file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
encode.rs | use super::constants::{CR, DEFAULT_LINE_SIZE, DOT, ESCAPE, LF, NUL};
use super::errors::EncodeError;
use std::fs::File;
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use std::path::Path;
/// Options for encoding.
/// The entry point for encoding a file (part)
/// to a file or (TCP) stream.
#[derive(Debug)]
pub struct EncodeOptions {
line_length: u8,
parts: u32,
part: u32,
begin: u64,
end: u64,
}
impl Default for EncodeOptions {
/// Constructs a new EncodeOptions instance, with the following defaults:
/// line_length = 128.
/// parts = 1,
/// part = begin = end = 0
fn default() -> Self {
EncodeOptions {
line_length: DEFAULT_LINE_SIZE,
parts: 1,
part: 0,
begin: 0,
end: 0,
}
}
}
impl EncodeOptions {
/// Constructs a new EncodeOptions with defaults, see Default impl.
pub fn new() -> EncodeOptions {
Default::default()
}
/// Sets the maximum line length.
pub fn line_length(mut self, line_length: u8) -> EncodeOptions {
self.line_length = line_length;
self
}
/// Sets the number of parts (default=1).
/// When the number of parts is 1, no '=ypart' line will be written
/// in the ouput.
pub fn parts(mut self, parts: u32) -> EncodeOptions {
self.parts = parts;
self
}
/// Sets the part number.
/// Only used when `parts > 1`.
/// The part number count starts at 1.
pub fn part(mut self, part: u32) -> EncodeOptions {
self.part = part;
self
}
/// Sets the begin (which is the file offset + 1).
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
pub fn begin(mut self, begin: u64) -> EncodeOptions {
self.begin = begin;
self
}
/// Sets the end.
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
/// `end` should be larger than `begin`, otherwise an overflow error occurrs.
pub fn end(mut self, end: u64) -> EncodeOptions {
self.end = end;
self
}
/// Encodes the input file and writes it to the writer. For multi-part encoding, only
/// one part is encoded. In case of multipart, the part number, begin and end offset need
/// to be specified in the `EncodeOptions`. When directly encoding to an NNTP stream, the
/// caller needs to take care of the message header and end of multi-line block (`".\r\n"`).
///
/// # Example
/// ```rust,no_run
/// let encode_options = yenc::EncodeOptions::default()
/// .parts(2)
/// .part(1)
/// .begin(1)
/// .end(38400);
/// let mut output_file = std::fs::File::create("test1.bin.yenc.001").unwrap();
/// encode_options.encode_file("test1.bin", &mut output_file).unwrap();
/// ```
/// # Errors
/// - when the output file already exists
///
pub fn encode_file<P, W>(&self, input_path: P, output: W) -> Result<(), EncodeError>
where
P: AsRef<Path>,
W: Write,
{
let input_filename = input_path.as_ref().file_name();
let input_filename = match input_filename {
Some(s) => s.to_str().unwrap_or(""),
None => "",
};
let input_file = File::open(&input_path)?;
let length = input_file.metadata()?.len();
self.encode_stream(input_file, output, length, input_filename)
}
/// Checks the options. Returns Ok(()) if all options are ok.
/// # Return
/// - EncodeError::PartNumberMissing
/// - EncodeError::PartBeginOffsetMissing
/// - EncodeError::PartEndOffsetMissing
/// - EncodeError::PartOffsetsInvalidRange
pub fn check_options(&self) -> Result<(), EncodeError> {
if self.parts > 1 && self.part == 0 {
return Err(EncodeError::PartNumberMissing);
}
if self.parts > 1 && self.begin == 0 {
return Err(EncodeError::PartBeginOffsetMissing);
}
if self.parts > 1 && self.end == 0 {
return Err(EncodeError::PartEndOffsetMissing);
}
if self.parts > 1 && self.begin > self.end {
return Err(EncodeError::PartOffsetsInvalidRange);
}
Ok(())
}
/// Encodes the date from input from stream and writes the encoded data to the output stream.
/// The input stream does not need to be a file, therefore, size and input_filename
/// must be specified. The input_filename ends up as the filename in the yenc header.
#[allow(clippy::write_with_newline)]
pub fn encode_stream<R, W>(
&self,
input: R,
output: W,
length: u64,
input_filename: &str,
) -> Result<(), EncodeError>
where
R: Read + Seek,
W: Write,
{
let mut rdr = BufReader::new(input);
let mut checksum = crc32fast::Hasher::new();
let mut buffer = [0u8; 8192];
let mut col = 0;
let mut num_bytes = 0;
let mut output = BufWriter::new(output);
self.check_options()?;
if self.parts == 1 {
write!(
output,
"=ybegin line={} size={} name={}\r\n",
self.line_length, length, input_filename
)?;
} else {
write!(
output,
"=ybegin part={} line={} size={} name={}\r\n",
self.part, self.line_length, length, input_filename
)?;
}
if self.parts > 1 {
write!(output, "=ypart begin={} end={}\r\n", self.begin, self.end)?;
}
rdr.seek(SeekFrom::Start(self.begin - 1))?;
let mut remainder = (self.end - self.begin + 1) as usize;
while remainder > 0 {
let buf_slice = if remainder > buffer.len() {
&mut buffer[..]
} else {
&mut buffer[0..remainder]
};
rdr.read_exact(buf_slice)?;
checksum.update(buf_slice);
num_bytes += buf_slice.len();
col = encode_buffer(buf_slice, col, self.line_length, &mut output)?;
remainder -= buf_slice.len();
}
if self.parts > 1 {
write!(
output,
"\r\n=yend size={} part={} pcrc32={:08x}\r\n",
num_bytes,
self.part,
checksum.finalize()
)?;
} else {
write!(
output,
"\r\n=yend size={} crc32={:08x}\r\n",
num_bytes,
checksum.finalize()
)?;
}
Ok(())
}
}
/// Encodes the input buffer and writes it to the writer.
///
/// Lines are wrapped with a maximum of `line_length` characters per line.
/// Does not include the header and footer lines.
/// Only `encode_stream` and `encode_file` produce the headers in the output.
/// The `col` parameter is the starting offset in the row. The result contains the new offset.
pub fn encode_buffer<W>(
input: &[u8],
col: u8,
line_length: u8,
writer: W,
) -> Result<u8, EncodeError>
where
W: Write,
{
let mut col = col;
let mut writer = writer;
let mut v = Vec::<u8>::with_capacity(((input.len() as f64) * 1.04) as usize);
input.iter().for_each(|&b| {
let encoded = encode_byte(b);
v.push(encoded.0);
col += match encoded.0 {
ESCAPE => {
v.push(encoded.1);
2
}
DOT if col == 0 => {
v.push(DOT);
2
}
_ => 1,
};
if col >= line_length {
v.push(CR);
v.push(LF);
col = 0;
}
});
writer.write_all(&v)?;
Ok(col)
}
#[inline(always)]
fn encode_byte(input_byte: u8) -> (u8, u8) {
let mut output = (0, 0);
let output_byte = input_byte.overflowing_add(42).0;
match output_byte {
LF | CR | NUL | ESCAPE => {
output.0 = ESCAPE;
output.1 = output_byte.overflowing_add(64).0;
}
_ => {
output.0 = output_byte;
}
};
output
}
#[cfg(test)]
mod tests {
use super::super::constants::{CR, ESCAPE, LF, NUL};
use super::{encode_buffer, encode_byte, EncodeOptions};
#[test]
fn escape_null() {
assert_eq!((ESCAPE, 0x40), encode_byte(214));
}
/*
#[test]
fn escape_tab() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + TAB, &mut output));
assert_eq!(vec![ESCAPE, 0x49], output);
}
*/
#[test]
fn escape_lf() {
assert_eq!((ESCAPE, 0x4A), encode_byte(214 + LF));
}
#[test]
fn escape_cr() {
assert_eq!((ESCAPE, 0x4D), encode_byte(214 + CR));
}
/*
#[test]
fn escape_space() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + SPACE, &mut output));
assert_eq!(vec![ESCAPE, 0x60], output);
}
*/
#[test]
fn escape_equal_sign() {
assert_eq!((ESCAPE, 0x7D), encode_byte(ESCAPE - 42));
}
#[test]
fn non_escaped() {
for x in 0..256u16 {
let encoded = (x as u8).overflowing_add(42).0;
if encoded!= NUL && encoded!= CR && encoded!= LF && encoded!= ESCAPE {
assert_eq!((encoded, 0), encode_byte(x as u8));
}
}
}
#[test]
fn test_encode_buffer() {
let buffer = (0..256u16).map(|c| c as u8).collect::<Vec<u8>>();
#[rustfmt::skip]
const EXPECTED: [u8; 264] =
[42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
125, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 13, 10, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
211, 212, 213, 214, 215, 216,217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 61, 64, 1, 2, 3,
4, 5, 6, 7, 8, 9, 61, 74, 11, 12, 61, 77, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 13, 10, 38, 39, 40, 41];
let mut encoded = Vec::<u8>::new();
let result = encode_buffer(&buffer, 0, 128, &mut encoded);
assert!(result.is_ok());
assert_eq!(encoded.as_slice(), &EXPECTED[..]);
}
#[test]
fn encode_options_invalid_parts() {
let encode_options = EncodeOptions::new().parts(2).begin(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_begin() |
#[test]
fn encode_options_invalid_end() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_range() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(38400).end(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
}
| {
let encode_options = EncodeOptions::new().parts(2).part(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
} | identifier_body |
encode.rs | use super::constants::{CR, DEFAULT_LINE_SIZE, DOT, ESCAPE, LF, NUL};
use super::errors::EncodeError;
use std::fs::File;
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use std::path::Path;
/// Options for encoding.
/// The entry point for encoding a file (part)
/// to a file or (TCP) stream.
#[derive(Debug)]
pub struct EncodeOptions {
line_length: u8,
parts: u32,
part: u32,
begin: u64,
end: u64,
}
impl Default for EncodeOptions {
/// Constructs a new EncodeOptions instance, with the following defaults:
/// line_length = 128.
/// parts = 1,
/// part = begin = end = 0
fn default() -> Self {
EncodeOptions {
line_length: DEFAULT_LINE_SIZE,
parts: 1,
part: 0,
begin: 0,
end: 0,
}
}
}
impl EncodeOptions {
/// Constructs a new EncodeOptions with defaults, see Default impl.
pub fn new() -> EncodeOptions {
Default::default()
}
/// Sets the maximum line length.
pub fn line_length(mut self, line_length: u8) -> EncodeOptions {
self.line_length = line_length;
self
}
/// Sets the number of parts (default=1).
/// When the number of parts is 1, no '=ypart' line will be written
/// in the ouput.
pub fn parts(mut self, parts: u32) -> EncodeOptions {
self.parts = parts;
self
}
/// Sets the part number.
/// Only used when `parts > 1`.
/// The part number count starts at 1.
pub fn part(mut self, part: u32) -> EncodeOptions {
self.part = part;
self
}
/// Sets the begin (which is the file offset + 1).
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
pub fn begin(mut self, begin: u64) -> EncodeOptions {
self.begin = begin;
self
}
/// Sets the end.
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
/// `end` should be larger than `begin`, otherwise an overflow error occurrs.
pub fn end(mut self, end: u64) -> EncodeOptions {
self.end = end;
self
}
/// Encodes the input file and writes it to the writer. For multi-part encoding, only
/// one part is encoded. In case of multipart, the part number, begin and end offset need
/// to be specified in the `EncodeOptions`. When directly encoding to an NNTP stream, the
/// caller needs to take care of the message header and end of multi-line block (`".\r\n"`).
///
/// # Example
/// ```rust,no_run
/// let encode_options = yenc::EncodeOptions::default()
/// .parts(2)
/// .part(1)
/// .begin(1)
/// .end(38400);
/// let mut output_file = std::fs::File::create("test1.bin.yenc.001").unwrap();
/// encode_options.encode_file("test1.bin", &mut output_file).unwrap();
/// ```
/// # Errors
/// - when the output file already exists
///
pub fn encode_file<P, W>(&self, input_path: P, output: W) -> Result<(), EncodeError>
where
P: AsRef<Path>,
W: Write,
{
let input_filename = input_path.as_ref().file_name();
let input_filename = match input_filename {
Some(s) => s.to_str().unwrap_or(""),
None => "",
};
let input_file = File::open(&input_path)?;
let length = input_file.metadata()?.len();
self.encode_stream(input_file, output, length, input_filename)
}
/// Checks the options. Returns Ok(()) if all options are ok.
/// # Return
/// - EncodeError::PartNumberMissing
/// - EncodeError::PartBeginOffsetMissing
/// - EncodeError::PartEndOffsetMissing
/// - EncodeError::PartOffsetsInvalidRange
pub fn check_options(&self) -> Result<(), EncodeError> {
if self.parts > 1 && self.part == 0 {
return Err(EncodeError::PartNumberMissing);
}
if self.parts > 1 && self.begin == 0 {
return Err(EncodeError::PartBeginOffsetMissing);
}
if self.parts > 1 && self.end == 0 {
return Err(EncodeError::PartEndOffsetMissing);
}
if self.parts > 1 && self.begin > self.end {
return Err(EncodeError::PartOffsetsInvalidRange);
}
Ok(())
}
/// Encodes the date from input from stream and writes the encoded data to the output stream.
/// The input stream does not need to be a file, therefore, size and input_filename
/// must be specified. The input_filename ends up as the filename in the yenc header.
#[allow(clippy::write_with_newline)]
pub fn encode_stream<R, W>(
&self,
input: R,
output: W,
length: u64,
input_filename: &str,
) -> Result<(), EncodeError>
where
R: Read + Seek,
W: Write,
{
let mut rdr = BufReader::new(input);
let mut checksum = crc32fast::Hasher::new();
let mut buffer = [0u8; 8192];
let mut col = 0;
let mut num_bytes = 0;
let mut output = BufWriter::new(output);
self.check_options()?;
if self.parts == 1 {
write!(
output,
"=ybegin line={} size={} name={}\r\n",
self.line_length, length, input_filename
)?;
} else {
write!(
output,
"=ybegin part={} line={} size={} name={}\r\n",
self.part, self.line_length, length, input_filename
)?;
}
if self.parts > 1 {
write!(output, "=ypart begin={} end={}\r\n", self.begin, self.end)?;
}
rdr.seek(SeekFrom::Start(self.begin - 1))?;
let mut remainder = (self.end - self.begin + 1) as usize;
while remainder > 0 {
let buf_slice = if remainder > buffer.len() {
&mut buffer[..]
} else {
&mut buffer[0..remainder]
};
rdr.read_exact(buf_slice)?;
checksum.update(buf_slice);
num_bytes += buf_slice.len();
col = encode_buffer(buf_slice, col, self.line_length, &mut output)?;
remainder -= buf_slice.len();
}
if self.parts > 1 {
write!(
output,
"\r\n=yend size={} part={} pcrc32={:08x}\r\n",
num_bytes,
self.part,
checksum.finalize()
)?;
} else {
write!(
output,
"\r\n=yend size={} crc32={:08x}\r\n",
num_bytes,
checksum.finalize()
)?;
}
Ok(())
}
}
/// Encodes the input buffer and writes it to the writer.
///
/// Lines are wrapped with a maximum of `line_length` characters per line.
/// Does not include the header and footer lines.
/// Only `encode_stream` and `encode_file` produce the headers in the output.
/// The `col` parameter is the starting offset in the row. The result contains the new offset.
pub fn encode_buffer<W>(
input: &[u8],
col: u8,
line_length: u8,
writer: W,
) -> Result<u8, EncodeError>
where
W: Write,
{
let mut col = col;
let mut writer = writer;
let mut v = Vec::<u8>::with_capacity(((input.len() as f64) * 1.04) as usize);
input.iter().for_each(|&b| {
let encoded = encode_byte(b);
v.push(encoded.0);
col += match encoded.0 {
ESCAPE => {
v.push(encoded.1);
2
}
DOT if col == 0 => |
_ => 1,
};
if col >= line_length {
v.push(CR);
v.push(LF);
col = 0;
}
});
writer.write_all(&v)?;
Ok(col)
}
#[inline(always)]
fn encode_byte(input_byte: u8) -> (u8, u8) {
let mut output = (0, 0);
let output_byte = input_byte.overflowing_add(42).0;
match output_byte {
LF | CR | NUL | ESCAPE => {
output.0 = ESCAPE;
output.1 = output_byte.overflowing_add(64).0;
}
_ => {
output.0 = output_byte;
}
};
output
}
#[cfg(test)]
mod tests {
use super::super::constants::{CR, ESCAPE, LF, NUL};
use super::{encode_buffer, encode_byte, EncodeOptions};
#[test]
fn escape_null() {
assert_eq!((ESCAPE, 0x40), encode_byte(214));
}
/*
#[test]
fn escape_tab() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + TAB, &mut output));
assert_eq!(vec![ESCAPE, 0x49], output);
}
*/
#[test]
fn escape_lf() {
assert_eq!((ESCAPE, 0x4A), encode_byte(214 + LF));
}
#[test]
fn escape_cr() {
assert_eq!((ESCAPE, 0x4D), encode_byte(214 + CR));
}
/*
#[test]
fn escape_space() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + SPACE, &mut output));
assert_eq!(vec![ESCAPE, 0x60], output);
}
*/
#[test]
fn escape_equal_sign() {
assert_eq!((ESCAPE, 0x7D), encode_byte(ESCAPE - 42));
}
#[test]
fn non_escaped() {
for x in 0..256u16 {
let encoded = (x as u8).overflowing_add(42).0;
if encoded!= NUL && encoded!= CR && encoded!= LF && encoded!= ESCAPE {
assert_eq!((encoded, 0), encode_byte(x as u8));
}
}
}
#[test]
fn test_encode_buffer() {
let buffer = (0..256u16).map(|c| c as u8).collect::<Vec<u8>>();
#[rustfmt::skip]
const EXPECTED: [u8; 264] =
[42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
125, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 13, 10, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
211, 212, 213, 214, 215, 216,217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 61, 64, 1, 2, 3,
4, 5, 6, 7, 8, 9, 61, 74, 11, 12, 61, 77, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 13, 10, 38, 39, 40, 41];
let mut encoded = Vec::<u8>::new();
let result = encode_buffer(&buffer, 0, 128, &mut encoded);
assert!(result.is_ok());
assert_eq!(encoded.as_slice(), &EXPECTED[..]);
}
#[test]
fn encode_options_invalid_parts() {
let encode_options = EncodeOptions::new().parts(2).begin(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_begin() {
let encode_options = EncodeOptions::new().parts(2).part(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_end() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_range() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(38400).end(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
}
| {
v.push(DOT);
2
} | conditional_block |
encode.rs | use super::constants::{CR, DEFAULT_LINE_SIZE, DOT, ESCAPE, LF, NUL};
use super::errors::EncodeError;
use std::fs::File;
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use std::path::Path;
/// Options for encoding.
/// The entry point for encoding a file (part)
/// to a file or (TCP) stream.
#[derive(Debug)]
pub struct EncodeOptions {
line_length: u8,
parts: u32,
part: u32,
begin: u64,
end: u64,
}
impl Default for EncodeOptions {
/// Constructs a new EncodeOptions instance, with the following defaults:
/// line_length = 128.
/// parts = 1,
/// part = begin = end = 0
fn default() -> Self {
EncodeOptions {
line_length: DEFAULT_LINE_SIZE,
parts: 1,
part: 0,
begin: 0,
end: 0,
}
}
}
impl EncodeOptions {
/// Constructs a new EncodeOptions with defaults, see Default impl.
pub fn new() -> EncodeOptions {
Default::default()
}
/// Sets the maximum line length.
pub fn line_length(mut self, line_length: u8) -> EncodeOptions {
self.line_length = line_length;
self
}
/// Sets the number of parts (default=1).
/// When the number of parts is 1, no '=ypart' line will be written
/// in the ouput.
pub fn parts(mut self, parts: u32) -> EncodeOptions {
self.parts = parts;
self
}
/// Sets the part number.
/// Only used when `parts > 1`.
/// The part number count starts at 1.
pub fn part(mut self, part: u32) -> EncodeOptions {
self.part = part;
self
}
/// Sets the begin (which is the file offset + 1).
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
pub fn begin(mut self, begin: u64) -> EncodeOptions {
self.begin = begin;
self
}
/// Sets the end.
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
/// `end` should be larger than `begin`, otherwise an overflow error occurrs.
pub fn end(mut self, end: u64) -> EncodeOptions {
self.end = end;
self
}
/// Encodes the input file and writes it to the writer. For multi-part encoding, only
/// one part is encoded. In case of multipart, the part number, begin and end offset need
/// to be specified in the `EncodeOptions`. When directly encoding to an NNTP stream, the
/// caller needs to take care of the message header and end of multi-line block (`".\r\n"`).
///
/// # Example
/// ```rust,no_run
/// let encode_options = yenc::EncodeOptions::default()
/// .parts(2)
/// .part(1)
/// .begin(1)
/// .end(38400);
/// let mut output_file = std::fs::File::create("test1.bin.yenc.001").unwrap();
/// encode_options.encode_file("test1.bin", &mut output_file).unwrap();
/// ```
/// # Errors
/// - when the output file already exists
///
pub fn encode_file<P, W>(&self, input_path: P, output: W) -> Result<(), EncodeError>
where
P: AsRef<Path>,
W: Write,
{
let input_filename = input_path.as_ref().file_name();
let input_filename = match input_filename {
Some(s) => s.to_str().unwrap_or(""),
None => "",
};
let input_file = File::open(&input_path)?;
let length = input_file.metadata()?.len();
self.encode_stream(input_file, output, length, input_filename)
}
/// Checks the options. Returns Ok(()) if all options are ok.
/// # Return
/// - EncodeError::PartNumberMissing
/// - EncodeError::PartBeginOffsetMissing
/// - EncodeError::PartEndOffsetMissing
/// - EncodeError::PartOffsetsInvalidRange
pub fn check_options(&self) -> Result<(), EncodeError> {
if self.parts > 1 && self.part == 0 {
return Err(EncodeError::PartNumberMissing);
}
if self.parts > 1 && self.begin == 0 {
return Err(EncodeError::PartBeginOffsetMissing);
}
if self.parts > 1 && self.end == 0 {
return Err(EncodeError::PartEndOffsetMissing);
}
if self.parts > 1 && self.begin > self.end {
return Err(EncodeError::PartOffsetsInvalidRange);
}
Ok(())
}
/// Encodes the date from input from stream and writes the encoded data to the output stream.
/// The input stream does not need to be a file, therefore, size and input_filename
/// must be specified. The input_filename ends up as the filename in the yenc header.
#[allow(clippy::write_with_newline)]
pub fn | <R, W>(
&self,
input: R,
output: W,
length: u64,
input_filename: &str,
) -> Result<(), EncodeError>
where
R: Read + Seek,
W: Write,
{
let mut rdr = BufReader::new(input);
let mut checksum = crc32fast::Hasher::new();
let mut buffer = [0u8; 8192];
let mut col = 0;
let mut num_bytes = 0;
let mut output = BufWriter::new(output);
self.check_options()?;
if self.parts == 1 {
write!(
output,
"=ybegin line={} size={} name={}\r\n",
self.line_length, length, input_filename
)?;
} else {
write!(
output,
"=ybegin part={} line={} size={} name={}\r\n",
self.part, self.line_length, length, input_filename
)?;
}
if self.parts > 1 {
write!(output, "=ypart begin={} end={}\r\n", self.begin, self.end)?;
}
rdr.seek(SeekFrom::Start(self.begin - 1))?;
let mut remainder = (self.end - self.begin + 1) as usize;
while remainder > 0 {
let buf_slice = if remainder > buffer.len() {
&mut buffer[..]
} else {
&mut buffer[0..remainder]
};
rdr.read_exact(buf_slice)?;
checksum.update(buf_slice);
num_bytes += buf_slice.len();
col = encode_buffer(buf_slice, col, self.line_length, &mut output)?;
remainder -= buf_slice.len();
}
if self.parts > 1 {
write!(
output,
"\r\n=yend size={} part={} pcrc32={:08x}\r\n",
num_bytes,
self.part,
checksum.finalize()
)?;
} else {
write!(
output,
"\r\n=yend size={} crc32={:08x}\r\n",
num_bytes,
checksum.finalize()
)?;
}
Ok(())
}
}
/// Encodes the input buffer and writes it to the writer.
///
/// Lines are wrapped with a maximum of `line_length` characters per line.
/// Does not include the header and footer lines.
/// Only `encode_stream` and `encode_file` produce the headers in the output.
/// The `col` parameter is the starting offset in the row. The result contains the new offset.
pub fn encode_buffer<W>(
input: &[u8],
col: u8,
line_length: u8,
writer: W,
) -> Result<u8, EncodeError>
where
W: Write,
{
let mut col = col;
let mut writer = writer;
let mut v = Vec::<u8>::with_capacity(((input.len() as f64) * 1.04) as usize);
input.iter().for_each(|&b| {
let encoded = encode_byte(b);
v.push(encoded.0);
col += match encoded.0 {
ESCAPE => {
v.push(encoded.1);
2
}
DOT if col == 0 => {
v.push(DOT);
2
}
_ => 1,
};
if col >= line_length {
v.push(CR);
v.push(LF);
col = 0;
}
});
writer.write_all(&v)?;
Ok(col)
}
#[inline(always)]
fn encode_byte(input_byte: u8) -> (u8, u8) {
let mut output = (0, 0);
let output_byte = input_byte.overflowing_add(42).0;
match output_byte {
LF | CR | NUL | ESCAPE => {
output.0 = ESCAPE;
output.1 = output_byte.overflowing_add(64).0;
}
_ => {
output.0 = output_byte;
}
};
output
}
#[cfg(test)]
mod tests {
use super::super::constants::{CR, ESCAPE, LF, NUL};
use super::{encode_buffer, encode_byte, EncodeOptions};
#[test]
fn escape_null() {
assert_eq!((ESCAPE, 0x40), encode_byte(214));
}
/*
#[test]
fn escape_tab() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + TAB, &mut output));
assert_eq!(vec![ESCAPE, 0x49], output);
}
*/
#[test]
fn escape_lf() {
assert_eq!((ESCAPE, 0x4A), encode_byte(214 + LF));
}
#[test]
fn escape_cr() {
assert_eq!((ESCAPE, 0x4D), encode_byte(214 + CR));
}
/*
#[test]
fn escape_space() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + SPACE, &mut output));
assert_eq!(vec![ESCAPE, 0x60], output);
}
*/
#[test]
fn escape_equal_sign() {
assert_eq!((ESCAPE, 0x7D), encode_byte(ESCAPE - 42));
}
#[test]
fn non_escaped() {
for x in 0..256u16 {
let encoded = (x as u8).overflowing_add(42).0;
if encoded!= NUL && encoded!= CR && encoded!= LF && encoded!= ESCAPE {
assert_eq!((encoded, 0), encode_byte(x as u8));
}
}
}
#[test]
fn test_encode_buffer() {
let buffer = (0..256u16).map(|c| c as u8).collect::<Vec<u8>>();
#[rustfmt::skip]
const EXPECTED: [u8; 264] =
[42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
125, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 13, 10, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
211, 212, 213, 214, 215, 216,217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 61, 64, 1, 2, 3,
4, 5, 6, 7, 8, 9, 61, 74, 11, 12, 61, 77, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 13, 10, 38, 39, 40, 41];
let mut encoded = Vec::<u8>::new();
let result = encode_buffer(&buffer, 0, 128, &mut encoded);
assert!(result.is_ok());
assert_eq!(encoded.as_slice(), &EXPECTED[..]);
}
#[test]
fn encode_options_invalid_parts() {
let encode_options = EncodeOptions::new().parts(2).begin(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_begin() {
let encode_options = EncodeOptions::new().parts(2).part(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_end() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_range() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(38400).end(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
}
| encode_stream | identifier_name |
minwinbase.rs | /
/
Licensed
under
the
Apache
License
Version
2
.
0
/
/
<
LICENSE
-
APACHE
or
http
:
/
/
www
.
apache
.
org
/
licenses
/
LICENSE
-
2
.
0
>
or
the
MIT
license
/
/
<
LICENSE
-
MIT
or
http
:
/
/
opensource
.
org
/
licenses
/
MIT
>
at
your
option
.
/
/
All
files
in
the
project
carrying
such
notice
may
not
be
copied
modified
or
distributed
/
/
except
according
to
those
terms
/
/
!
This
module
defines
the
32
-
Bit
Windows
Base
APIs
use
shared
:
:
basetsd
:
:
ULONG_PTR
;
use
shared
:
:
minwindef
:
:
{
BOOL
BYTE
DWORD
FILETIME
HMODULE
LPVOID
MAX_PATH
UINT
ULONG
WORD
}
;
use
shared
:
:
ntstatus
:
:
{
STATUS_ACCESS_VIOLATION
STATUS_ARRAY_BOUNDS_EXCEEDED
STATUS_BREAKPOINT
STATUS_CONTROL_C_EXIT
STATUS_DATATYPE_MISALIGNMENT
STATUS_FLOAT_DENORMAL_OPERAND
STATUS_FLOAT_DIVIDE_BY_ZERO
STATUS_FLOAT_INEXACT_RESULT
STATUS_FLOAT_INVALID_OPERATION
STATUS_FLOAT_OVERFLOW
STATUS_FLOAT_STACK_CHECK
STATUS_FLOAT_UNDERFLOW
STATUS_GUARD_PAGE_VIOLATION
STATUS_ILLEGAL_INSTRUCTION
STATUS_INTEGER_DIVIDE_BY_ZERO
STATUS_INTEGER_OVERFLOW
STATUS_INVALID_DISPOSITION
STATUS_INVALID_HANDLE
STATUS_IN_PAGE_ERROR
STATUS_NONCONTINUABLE_EXCEPTION
STATUS_PENDING
STATUS_POSSIBLE_DEADLOCK
STATUS_PRIVILEGED_INSTRUCTION
STATUS_SINGLE_STEP
STATUS_STACK_OVERFLOW
}
;
use
um
:
:
winnt
:
:
{
CHAR
EXCEPTION_RECORD
HANDLE
LPSTR
LPWSTR
PCONTEXT
PRTL_CRITICAL_SECTION
PRTL_CRITICAL_SECTION_DEBUG
PVOID
RTL_CRITICAL_SECTION
RTL_CRITICAL_SECTION_DEBUG
WCHAR
}
;
/
/
MoveMemory
/
/
CopyMemory
/
/
FillMemory
/
/
ZeroMemory
STRUCT
!
{
struct
SECURITY_ATTRIBUTES
{
nLength
:
DWORD
lpSecurityDescriptor
:
LPVOID
bInheritHandle
:
BOOL
}
}
pub
type
PSECURITY_ATTRIBUTES
=
*
mut
SECURITY_ATTRIBUTES
;
pub
type
LPSECURITY_ATTRIBUTES
=
*
mut
SECURITY_ATTRIBUTES
;
STRUCT
!
{
struct
OVERLAPPED_u_s
{
Offset
:
DWORD
OffsetHigh
:
DWORD
}
}
UNION
!
{
union
OVERLAPPED_u
{
[
u32
;
2
]
[
u64
;
1
]
s
s_mut
:
OVERLAPPED_u_s
Pointer
Pointer_mut
:
PVOID
}
}
STRUCT
!
{
struct
OVERLAPPED
{
Internal
:
ULONG_PTR
InternalHigh
:
ULONG_PTR
u
:
OVERLAPPED_u
hEvent
:
HANDLE
}
}
pub
type
LPOVERLAPPED
=
*
mut
OVERLAPPED
;
STRUCT
!
{
struct
OVERLAPPED_ENTRY
{
lpCompletionKey
:
ULONG_PTR
lpOverlapped
:
LPOVERLAPPED
Internal
:
ULONG_PTR
dwNumberOfBytesTransferred
:
DWORD
}
}
pub
type
LPOVERLAPPED_ENTRY
=
*
mut
OVERLAPPED_ENTRY
;
STRUCT
!
{
struct
SYSTEMTIME
{
wYear
:
WORD
wMonth
:
WORD
wDayOfWeek
:
WORD
wDay
:
WORD
wHour
:
WORD
wMinute
:
WORD
wSecond
:
WORD
wMilliseconds
:
WORD
}
}
pub
type
PSYSTEMTIME
=
*
mut
SYSTEMTIME
;
pub
type
LPSYSTEMTIME
=
*
mut
SYSTEMTIME
;
STRUCT
!
{
struct
WIN32_FIND_DATAA
{
dwFileAttributes
:
DWORD
ftCreationTime
:
FILETIME
ftLastAccessTime
:
FILETIME
ftLastWriteTime
:
FILETIME
nFileSizeHigh
:
DWORD
nFileSizeLow
:
DWORD
dwReserved0
:
DWORD
dwReserved1
:
DWORD
cFileName
:
[
CHAR
;
MAX_PATH
]
cAlternateFileName
:
[
CHAR
;
14
]
}
}
pub
type
PWIN32_FIND_DATAA
=
*
mut
WIN32_FIND_DATAA
;
pub
type
LPWIN32_FIND_DATAA
=
*
mut
WIN32_FIND_DATAA
;
STRUCT
!
{
struct
WIN32_FIND_DATAW
{
dwFileAttributes
:
DWORD
ftCreationTime
:
FILETIME
ftLastAccessTime
:
FILETIME
ftLastWriteTime
:
FILETIME
nFileSizeHigh
:
DWORD
nFileSizeLow
:
DWORD
dwReserved0
:
DWORD
dwReserved1
:
DWORD
cFileName
:
[
WCHAR
;
MAX_PATH
]
cAlternateFileName
:
[
WCHAR
;
14
]
}
}
pub
type
PWIN32_FIND_DATAW
=
*
mut
WIN32_FIND_DATAW
;
pub
type
LPWIN32_FIND_DATAW
=
*
mut
WIN32_FIND_DATAW
;
ENUM
!
{
enum
FINDEX_INFO_LEVELS
{
FindExInfoStandard
FindExInfoBasic
FindExInfoMaxInfoLevel
}
}
pub
const
FIND_FIRST_EX_CASE_SENSITIVE
:
DWORD
=
0x00000001
;
pub
const
FIND_FIRST_EX_LARGE_FETCH
:
DWORD
=
0x00000002
;
ENUM
!
{
enum
FINDEX_SEARCH_OPS
{
FindExSearchNameMatch
FindExSearchLimitToDirectories
FindExSearchLimitToDevices
FindExSearchMaxSearchOp
}
}
ENUM
!
{
enum
GET_FILEEX_INFO_LEVELS
{
GetFileExInfoStandard
GetFileExMaxInfoLevel
}
}
ENUM
!
{
enum
FILE_INFO_BY_HANDLE_CLASS
{
FileBasicInfo
FileStandardInfo
FileNameInfo
FileRenameInfo
FileDispositionInfo
FileAllocationInfo
FileEndOfFileInfo
FileStreamInfo
FileCompressionInfo
FileAttributeTagInfo
FileIdBothDirectoryInfo
FileIdBothDirectoryRestartInfo
FileIoPriorityHintInfo
FileRemoteProtocolInfo
FileFullDirectoryInfo
FileFullDirectoryRestartInfo
FileStorageInfo
FileAlignmentInfo
FileIdInfo
FileIdExtdDirectoryInfo
FileIdExtdDirectoryRestartInfo
FileDispositionInfoEx
FileRenameInfoEx
MaximumFileInfoByHandleClass
}
}
pub
type
PFILE_INFO_BY_HANDLE_CLASS
=
*
mut
FILE_INFO_BY_HANDLE_CLASS
;
pub
type
CRITICAL_SECTION
=
RTL_CRITICAL_SECTION
;
pub
type
PCRITICAL_SECTION
=
PRTL_CRITICAL_SECTION
;
pub
type
LPCRITICAL_SECTION
=
PRTL_CRITICAL_SECTION
;
pub
type
CRITICAL_SECTION_DEBUG
=
RTL_CRITICAL_SECTION_DEBUG
;
pub
type
PCRITICAL_SECTION_DEBUG
=
PRTL_CRITICAL_SECTION_DEBUG
;
pub
type
LPCRITICAL_SECTION_DEBUG
=
PRTL_CRITICAL_SECTION_DEBUG
;
FN
!
{
stdcall
LPOVERLAPPED_COMPLETION_ROUTINE
(
dwErrorCode
:
DWORD
dwNumberOfBytesTransfered
:
DWORD
lpOverlapped
:
LPOVERLAPPED
)
-
>
(
)
}
pub
const
LOCKFILE_FAIL_IMMEDIATELY
:
DWORD
=
0x00000001
;
pub
const
LOCKFILE_EXCLUSIVE_LOCK
:
DWORD
=
0x00000002
;
STRUCT
!
{
struct
PROCESS_HEAP_ENTRY_Block
{
hMem
:
HANDLE
dwReserved
:
[
DWORD
;
3
]
}
}
STRUCT
!
{
struct
PROCESS_HEAP_ENTRY_Region
{
dwCommittedSize
:
DWORD
dwUnCommittedSize
:
DWORD
lpFirstBlock
:
LPVOID
lpLastBlock
:
LPVOID
}
}
UNION
!
{
union
PROCESS_HEAP_ENTRY_u
{
[
u32
;
4
]
[
u64
;
3
]
Block
Block_mut
:
PROCESS_HEAP_ENTRY_Block
Region
Region_mut
:
PROCESS_HEAP_ENTRY_Region
}
}
STRUCT
!
{
struct
PROCESS_HEAP_ENTRY
{
lpData
:
PVOID
cbData
:
DWORD
cbOverhead
:
BYTE
iRegionIndex
:
BYTE
wFlags
:
WORD
u
:
PROCESS_HEAP_ENTRY_u
}
}
pub
type
LPPROCESS_HEAP_ENTRY
=
*
mut
PROCESS_HEAP_ENTRY
;
pub
type
PPROCESS_HEAP_ENTRY
=
*
mut
PROCESS_HEAP_ENTRY
;
pub
const
PROCESS_HEAP_REGION
:
WORD
=
0x0001
;
pub
const
PROCESS_HEAP_UNCOMMITTED_RANGE
:
WORD
=
0x0002
;
pub
const
PROCESS_HEAP_ENTRY_BUSY
:
WORD
=
0x0004
;
pub
const
PROCESS_HEAP_SEG_ALLOC
:
WORD
=
0x0008
;
pub
const
PROCESS_HEAP_ENTRY_MOVEABLE
:
WORD
=
0x0010
;
pub
const
PROCESS_HEAP_ENTRY_DDESHARE
:
WORD
=
0x0020
;
STRUCT
!
{
struct
REASON_CONTEXT_Detailed
{
LocalizedReasonModule
:
HMODULE
LocalizedReasonId
:
ULONG
ReasonStringCount
:
ULONG
ReasonStrings
:
*
mut
LPWSTR
}
} | REASON_CONTEXT_Reason
{
[
u32
;
4
]
[
u64
;
3
]
Detailed
Detailed_mut
:
REASON_CONTEXT_Detailed
SimpleReasonString
SimpleReasonString_mut
:
LPWSTR
}
}
STRUCT
!
{
struct
REASON_CONTEXT
{
Version
:
ULONG
Flags
:
DWORD
Reason
:
REASON_CONTEXT_Reason
}
}
pub
type
PREASON_CONTEXT
=
*
mut
REASON_CONTEXT
;
pub
const
EXCEPTION_DEBUG_EVENT
:
DWORD
=
1
;
pub
const
CREATE_THREAD_DEBUG_EVENT
:
DWORD
=
2
;
pub
const
CREATE_PROCESS_DEBUG_EVENT
:
DWORD
=
3
;
pub
const
EXIT_THREAD_DEBUG_EVENT
:
DWORD
=
4
;
pub
const
EXIT_PROCESS_DEBUG_EVENT
:
DWORD
=
5
;
pub
const
LOAD_DLL_DEBUG_EVENT
:
DWORD
=
6
;
pub
const
UNLOAD_DLL_DEBUG_EVENT
:
DWORD
=
7
;
pub
const
OUTPUT_DEBUG_STRING_EVENT
:
DWORD
=
8
;
pub
const
RIP_EVENT
:
DWORD
=
9
;
FN
!
{
stdcall
PTHREAD_START_ROUTINE
(
lpThreadParameter
:
LPVOID
)
-
>
DWORD
}
pub
type
LPTHREAD_START_ROUTINE
=
PTHREAD_START_ROUTINE
;
FN
!
{
stdcall
PENCLAVE_ROUTINE
(
lpThreadParameter
:
LPVOID
)
-
>
DWORD
}
pub
type
LPENCLAVE_ROUTINE
=
PENCLAVE_ROUTINE
;
STRUCT
!
{
struct
EXCEPTION_DEBUG_INFO
{
ExceptionRecord
:
EXCEPTION_RECORD
dwFirstChance
:
DWORD
}
}
pub
type
LPEXCEPTION_DEBUG_INFO
=
*
mut
EXCEPTION_DEBUG_INFO
;
STRUCT
!
{
struct
CREATE_THREAD_DEBUG_INFO
{
hThread
:
HANDLE
lpThreadLocalBase
:
LPVOID
lpStartAddress
:
LPTHREAD_START_ROUTINE
}
}
pub
type
LPCREATE_THREAD_DEBUG_INFO
=
*
mut
CREATE_THREAD_DEBUG_INFO
;
STRUCT
!
{
struct
CREATE_PROCESS_DEBUG_INFO
{
hFile
:
HANDLE
hProcess
:
HANDLE
hThread
:
HANDLE
lpBaseOfImage
:
LPVOID
dwDebugInfoFileOffset
:
DWORD
nDebugInfoSize
:
DWORD
lpThreadLocalBase
:
LPVOID
lpStartAddress
:
LPTHREAD_START_ROUTINE
lpImageName
:
LPVOID
fUnicode
:
WORD
}
}
pub
type
LPCREATE_PROCESS_DEBUG_INFO
=
*
mut
CREATE_PROCESS_DEBUG_INFO
;
STRUCT
!
{
struct
EXIT_THREAD_DEBUG_INFO
{
dwExitCode
:
DWORD
}
}
pub
type
LPEXIT_THREAD_DEBUG_INFO
=
*
mut
EXIT_THREAD_DEBUG_INFO
;
STRUCT
!
{
struct
EXIT_PROCESS_DEBUG_INFO
{
dwExitCode
:
DWORD
}
}
pub
type
LPEXIT_PROCESS_DEBUG_INFO
=
*
mut
EXIT_PROCESS_DEBUG_INFO
;
STRUCT
!
{
struct
LOAD_DLL_DEBUG_INFO
{
hFile
:
HANDLE
lpBaseOfDll
:
LPVOID
dwDebugInfoFileOffset
:
DWORD
nDebugInfoSize
:
DWORD
lpImageName
:
LPVOID
fUnicode
:
WORD
}
}
pub
type
LPLOAD_DLL_DEBUG_INFO
=
*
mut
LOAD_DLL_DEBUG_INFO
;
STRUCT
!
{
struct
UNLOAD_DLL_DEBUG_INFO
{
lpBaseOfDll
:
LPVOID
}
}
pub
type
LPUNLOAD_DLL_DEBUG_INFO
=
*
mut
UNLOAD_DLL_DEBUG_INFO
;
STRUCT
!
{
struct
OUTPUT_DEBUG_STRING_INFO
{
lpDebugStringData
:
LPSTR
fUnicode
:
WORD
nDebugStringLength
:
WORD
}
}
pub
type
LPOUTPUT_DEBUG_STRING_INFO
=
*
mut
OUTPUT_DEBUG_STRING_INFO
;
STRUCT
!
{
struct
RIP_INFO
{
dwError
:
DWORD
dwType
:
DWORD
}
}
pub
type
LPRIP_INFO
=
*
mut
RIP_INFO
;
UNION
!
{
union
DEBUG_EVENT_u
{
[
u32
;
21
]
[
u64
;
20
]
Exception
Exception_mut
:
EXCEPTION_DEBUG_INFO
CreateThread
CreateThread_mut
:
CREATE_THREAD_DEBUG_INFO
CreateProcessInfo
CreateProcessInfo_mut
:
CREATE_PROCESS_DEBUG_INFO
ExitThread
ExitThread_mut
:
EXIT_THREAD_DEBUG_INFO
ExitProcess
ExitProcess_mut
:
EXIT_PROCESS_DEBUG_INFO
LoadDll
LoadDll_mut
:
LOAD_DLL_DEBUG_INFO
UnloadDll
UnloadDll_mut
:
UNLOAD_DLL_DEBUG_INFO
DebugString
DebugString_mut
:
OUTPUT_DEBUG_STRING_INFO
RipInfo
RipInfo_mut
:
RIP_INFO
}
}
STRUCT
!
{
struct
DEBUG_EVENT
{
dwDebugEventCode
:
DWORD
dwProcessId
:
DWORD
dwThreadId
:
DWORD
u
:
DEBUG_EVENT_u
}
}
pub
type
LPDEBUG_EVENT
=
*
mut
DEBUG_EVENT
;
pub
type
LPCONTEXT
=
PCONTEXT
;
pub
const
STILL_ACTIVE
:
DWORD
=
STATUS_PENDING
as
u32
;
pub
const
EXCEPTION_ACCESS_VIOLATION
:
DWORD
=
STATUS_ACCESS_VIOLATION
as
u32
;
pub
const
EXCEPTION_DATATYPE_MISALIGNMENT
:
DWORD
=
STATUS_DATATYPE_MISALIGNMENT
as
u32
;
pub
const
EXCEPTION_BREAKPOINT
:
DWORD
=
STATUS_BREAKPOINT
as
u32
;
pub
const
EXCEPTION_SINGLE_STEP
:
DWORD
=
STATUS_SINGLE_STEP
as
u32
;
pub
const
EXCEPTION_ARRAY_BOUNDS_EXCEEDED
:
DWORD
=
STATUS_ARRAY_BOUNDS_EXCEEDED
as
u32
;
pub
const
EXCEPTION_FLT_DENORMAL_OPERAND
:
DWORD
=
STATUS_FLOAT_DENORMAL_OPERAND
as
u32
;
pub
const
EXCEPTION_FLT_DIVIDE_BY_ZERO
:
DWORD
=
STATUS_FLOAT_DIVIDE_BY_ZERO
as
u32
;
pub
const
EXCEPTION_FLT_INEXACT_RESULT
:
DWORD
=
STATUS_FLOAT_INEXACT_RESULT
as
u32
;
pub
const
EXCEPTION_FLT_INVALID_OPERATION
:
DWORD
=
STATUS_FLOAT_INVALID_OPERATION
as
u32
;
pub
const
EXCEPTION_FLT_OVERFLOW
:
DWORD
=
STATUS_FLOAT_OVERFLOW
as
u32
;
pub
const
EXCEPTION_FLT_STACK_CHECK
:
DWORD
=
STATUS_FLOAT_STACK_CHECK
as
u32
;
pub
const
EXCEPTION_FLT_UNDERFLOW
:
DWORD
=
STATUS_FLOAT_UNDERFLOW
as
u32
;
pub
const
EXCEPTION_INT_DIVIDE_BY_ZERO
:
DWORD
=
STATUS_INTEGER_DIVIDE_BY_ZERO
as
u32
;
pub
const
EXCEPTION_INT_OVERFLOW
:
DWORD
=
STATUS_INTEGER_OVERFLOW
as
u32
;
pub
const
EXCEPTION_PRIV_INSTRUCTION
:
DWORD
=
STATUS_PRIVILEGED_INSTRUCTION
as
u32
;
pub
const
EXCEPTION_IN_PAGE_ERROR
:
DWORD
=
STATUS_IN_PAGE_ERROR
as
u32
;
pub
const
EXCEPTION_ILLEGAL_INSTRUCTION
:
DWORD
=
STATUS_ILLEGAL_INSTRUCTION
as
u32
;
pub
const
EXCEPTION_NONCONTINUABLE_EXCEPTION
:
DWORD
=
STATUS_NONCONTINUABLE_EXCEPTION
as
u32
;
pub
const
EXCEPTION_STACK_OVERFLOW
:
DWORD
=
STATUS_STACK_OVERFLOW
as
u32
;
pub
const
EXCEPTION_INVALID_DISPOSITION
:
DWORD
=
STATUS_INVALID_DISPOSITION
as
u32
;
pub
const
EXCEPTION_GUARD_PAGE
:
DWORD
=
STATUS_GUARD_PAGE_VIOLATION
as
u32
;
pub
const
EXCEPTION_INVALID_HANDLE
:
DWORD
=
STATUS_INVALID_HANDLE
as
u32
;
pub
const
EXCEPTION_POSSIBLE_DEADLOCK
:
DWORD
=
STATUS_POSSIBLE_DEADLOCK
as
u32
;
pub
const
CONTROL_C_EXIT
:
DWORD
=
STATUS_CONTROL_C_EXIT
as
u32
;
pub
const
LMEM_FIXED
:
UINT
=
0x0000
;
pub
const
LMEM_MOVEABLE
:
UINT
=
0x0002
;
pub
const
LMEM_NOCOMPACT
:
UINT
=
0x0010
;
pub
const
LMEM_NODISCARD
:
UINT
=
0x0020
;
pub
const
LMEM_ZEROINIT
:
UINT
=
0x0040
;
pub
const
LMEM_MODIFY
:
UINT
=
0x0080
;
pub
const
LMEM_DISCARDABLE
:
UINT
=
0x0F00
;
pub
const
LMEM_VALID_FLAGS
:
UINT
=
0x0F72
;
pub
const
LMEM_INVALID_HANDLE
:
UINT
=
0x8000
;
pub
const
LHND
:
UINT
=
LMEM_MOVEABLE
|
LMEM_ZEROINIT
;
pub
const
LPTR
:
UINT
=
LMEM_FIXED
|
LMEM_ZEROINIT
;
pub
const
NONZEROLHND
:
UINT
=
LMEM_MOVEABLE
;
pub
const
NONZEROLPTR
:
UINT
=
LMEM_FIXED
;
/
/
LocalDiscard
pub
const
LMEM_DISCARDED
:
UINT
=
0x4000
;
pub
const
LMEM_LOCKCOUNT
:
UINT
=
0x00FF
;
pub
const
NUMA_NO_PREFERRED_NODE
:
DWORD
=
-
1i32
as
u32
; | UNION
!
{
union | random_line_split |
lib.rs | //! A Proxy Connector crate for Hyper based applications
//!
//! # Example
//! ```rust,no_run
//! extern crate hyper;
//! extern crate hyper_proxy;
//! extern crate futures;
//! extern crate tokio_core;
//!
//! use hyper::{Chunk, Client, Request, Method, Uri};
//! use hyper::client::HttpConnector;
//! use hyper::header::Basic;
//! use futures::{Future, Stream};
//! use hyper_proxy::{Proxy, ProxyConnector, Intercept};
//! use tokio_core::reactor::Core;
//!
//! fn main() {
//! let mut core = Core::new().unwrap();
//! let handle = core.handle();
//!
//! let proxy = {
//! let proxy_uri = "http://my-proxy:8080".parse().unwrap();
//! let mut proxy = Proxy::new(Intercept::All, proxy_uri);
//! proxy.set_authorization(Basic {
//! username: "John Doe".into(),
//! password: Some("Agent1234".into()),
//! });
//! let connector = HttpConnector::new(4, &handle);
//! let proxy_connector = ProxyConnector::from_proxy(connector, proxy).unwrap();
//! proxy_connector
//! };
//!
//! // Connecting to http will trigger regular GETs and POSTs.
//! // We need to manually append the relevant headers to the request
//! let uri: Uri = "http://my-remote-website.com".parse().unwrap();
//! let mut req = Request::new(Method::Get, uri.clone());
//! if let Some(headers) = proxy.http_headers(&uri) {
//! req.headers_mut().extend(headers.iter());
//! req.set_proxy(true);
//! }
//! let client = Client::configure().connector(proxy).build(&handle);
//! let fut_http = client.request(req)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! // Connecting to an https uri is straightforward (uses 'CONNECT' method underneath)
//! let uri = "https://my-remote-websitei-secured.com".parse().unwrap();
//! let fut_https = client
//! .get(uri)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! let futs = fut_http.join(fut_https);
//!
//! let (_http_res, _https_res) = core.run(futs).unwrap();
//! }
//! ```
#![deny(missing_docs)]
extern crate bytes;
#[macro_use]
extern crate futures;
extern crate hyper;
#[cfg(test)]
extern crate hyper_tls;
#[cfg(feature = "tls")]
extern crate native_tls;
extern crate tokio_core;
extern crate tokio_io;
#[cfg(feature = "tls")]
extern crate tokio_tls;
mod tunnel;
mod stream;
use std::any::Any;
use std::fmt;
use std::io;
use std::sync::Arc;
use futures::Future;
use hyper::Uri;
use hyper::client::Service;
use hyper::header::{Authorization, Header, Headers, ProxyAuthorization, Scheme};
#[cfg(feature = "tls")]
use native_tls::TlsConnector;
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "tls")]
use tokio_tls::TlsConnectorExt;
use stream::ProxyStream;
/// The Intercept enum to filter connections
#[derive(Debug, Clone)]
pub enum Intercept {
/// All incoming connection will go through proxy
All,
/// Only http connections will go through proxy
Http,
/// Only https connections will go through proxy
Https,
/// No connection will go through this proxy
None,
/// A custom intercept
Custom(Custom),
}
/// A Custom struct to proxy custom uris
#[derive(Clone)]
pub struct Custom(Arc<Fn(&Uri) -> bool + Send + Sync>);
impl fmt::Debug for Custom {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "_")
}
}
impl<F: Fn(&Uri) -> bool + Send + Sync +'static> From<F> for Custom {
fn from(f: F) -> Custom {
Custom(Arc::new(f))
}
}
impl Intercept {
/// A function to check if given `Uri` is proxied
pub fn matches(&self, uri: &Uri) -> bool {
match (self, uri.scheme()) {
(&Intercept::All, _)
| (&Intercept::Http, Some("http"))
| (&Intercept::Https, Some("https")) => true,
(&Intercept::Custom(Custom(ref f)), _) => f(uri),
_ => false,
}
}
}
impl<F: Fn(&Uri) -> bool + Send + Sync +'static> From<F> for Intercept {
fn from(f: F) -> Intercept {
Intercept::Custom(f.into())
}
}
/// A Proxy strcut
#[derive(Clone, Debug)]
pub struct Proxy {
intercept: Intercept,
headers: Headers,
uri: Uri,
}
impl Proxy {
/// Create a new `Proxy`
pub fn new<I: Into<Intercept>>(intercept: I, uri: Uri) -> Proxy {
Proxy {
intercept: intercept.into(),
uri: uri,
headers: Headers::new(),
}
}
/// Set `Proxy` authorization
pub fn set_authorization<S: Scheme + Any>(&mut self, scheme: S) {
match self.intercept {
Intercept::Http => self.headers.set(Authorization(scheme)),
Intercept::Https => self.headers.set(ProxyAuthorization(scheme)),
_ => {
self.headers.set(ProxyAuthorization(scheme.clone()));
self.headers.set(Authorization(scheme));
}
}
}
/// Set a custom header
pub fn set_header<H: Header>(&mut self, header: H) {
self.headers.set(header);
}
/// Get current intercept
pub fn intercept(&self) -> &Intercept {
&self.intercept
}
/// Get current `Headers` which must be sent to proxy
pub fn headers(&self) -> &Headers {
&self.headers
}
/// Get proxy uri
pub fn uri(&self) -> &Uri {
&self.uri
}
}
/// A wrapper around `Proxy`s with a connector.
#[derive(Clone)]
pub struct ProxyConnector<C> {
proxies: Vec<Proxy>,
connector: C,
#[cfg(feature = "tls")]
tls: Option<TlsConnector>,
#[cfg(not(feature = "tls"))]
tls: Option<()>,
}
impl<C: fmt::Debug> fmt::Debug for ProxyConnector<C> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
f,
"ProxyConnector {}{{ proxies: {:?}, connector: {:?} }}",
if self.tls.is_some() {
""
} else {
"(unsecured)"
},
self.proxies,
self.connector
)
}
}
impl<C> ProxyConnector<C> {
/// Create a new secured Proxies
#[cfg(feature = "tls")]
pub fn new(connector: C) -> Result<Self, io::Error> {
let tls = TlsConnector::builder()
.and_then(|b| b.build())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: Some(tls),
})
}
/// Create a new unsecured Proxy
pub fn unsecured(connector: C) -> Self {
ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: None,
}
}
/// Create a proxy connector and attach a particular proxy
#[cfg(feature = "tls")]
pub fn from_proxy(connector: C, proxy: Proxy) -> Result<Self, io::Error> {
let mut c = ProxyConnector::new(connector)?;
c.proxies.push(proxy);
Ok(c)
}
/// Create a proxy connector and attach a particular proxy
pub fn from_proxy_unsecured(connector: C, proxy: Proxy) -> Self {
let mut c = ProxyConnector::unsecured(connector);
c.proxies.push(proxy);
c
}
/// Change proxy connector
pub fn with_connector<CC>(self, connector: CC) -> ProxyConnector<CC> {
ProxyConnector {
connector: connector,
proxies: self.proxies,
tls: self.tls,
}
}
/// Set or unset tls when tunneling
#[cfg(feature = "tls")]
pub fn set_tls(&mut self, tls: Option<TlsConnector>) {
self.tls = tls;
}
/// Get the current proxies
pub fn proxies(&self) -> &[Proxy] {
&self.proxies
}
/// Add a new additional proxy
pub fn add_proxy(&mut self, proxy: Proxy) {
self.proxies.push(proxy);
}
/// Extend the list of proxies
pub fn extend_proxies<I: IntoIterator<Item = Proxy>>(&mut self, proxies: I) {
self.proxies.extend(proxies)
}
/// Get http headers for a matching uri
///
/// These headers must be appended to the hyper Request for the proxy to work properly.
/// This is needed only for http requests.
pub fn http_headers(&self, uri: &Uri) -> Option<&Headers> {
if uri.scheme()!= Some("http") {
return None;
}
self.match_proxy(uri).map(|p| &p.headers)
}
fn | (&self, uri: &Uri) -> Option<&Proxy> {
self.proxies.iter().find(|p| p.intercept.matches(uri))
}
}
impl<C> Service for ProxyConnector<C>
where
C: Service<Request = Uri, Error = io::Error> +'static,
C::Future:'static,
<C::Future as Future>::Item: AsyncRead + AsyncWrite +'static,
{
type Request = Uri;
type Response = ProxyStream<C::Response>;
type Error = io::Error;
type Future = Box<Future<Item = ProxyStream<C::Response>, Error = Self::Error>>;
fn call(&self, uri: Uri) -> Self::Future {
if let Some(ref p) = self.match_proxy(&uri) {
if uri.scheme() == Some("https") {
let host = uri.host().unwrap().to_owned();
let port = uri.port().unwrap_or(443);
let tunnel = tunnel::Tunnel::new(&host, port, &p.headers);
let proxy_stream = self.connector
.call(p.uri.clone())
.and_then(move |io| tunnel.with_stream(io));
match self.tls.as_ref() {
#[cfg(feature = "tls")]
Some(tls) => {
let tls = tls.clone();
Box::new(
proxy_stream
.and_then(move |io| tls.connect_async(&host, io).map_err(io_err))
.map(|s| ProxyStream::Secured(s)),
)
},
#[cfg(not(feature = "tls"))]
Some(_) => panic!("hyper-proxy was not built with TLS support"),
None => Box::new(proxy_stream.map(|s| ProxyStream::Regular(s))),
}
} else {
// without TLS, there is absolutely zero benefit from tunneling, as the proxy can
// read the plaintext traffic. Thus, tunneling is just restrictive to the proxies
// resources.
Box::new(
self.connector
.call(p.uri.clone())
.map(|s| ProxyStream::Regular(s)),
)
}
} else {
Box::new(self.connector.call(uri).map(|s| ProxyStream::Regular(s)))
}
}
}
#[inline]
fn io_err<E: Into<Box<::std::error::Error + Send + Sync>>>(e: E) -> io::Error {
io::Error::new(io::ErrorKind::Other, e)
}
| match_proxy | identifier_name |
lib.rs | //! A Proxy Connector crate for Hyper based applications
//!
//! # Example
//! ```rust,no_run
//! extern crate hyper;
//! extern crate hyper_proxy;
//! extern crate futures;
//! extern crate tokio_core;
//!
//! use hyper::{Chunk, Client, Request, Method, Uri};
//! use hyper::client::HttpConnector;
//! use hyper::header::Basic;
//! use futures::{Future, Stream};
//! use hyper_proxy::{Proxy, ProxyConnector, Intercept};
//! use tokio_core::reactor::Core;
//!
//! fn main() {
//! let mut core = Core::new().unwrap();
//! let handle = core.handle();
//!
//! let proxy = {
//! let proxy_uri = "http://my-proxy:8080".parse().unwrap();
//! let mut proxy = Proxy::new(Intercept::All, proxy_uri);
//! proxy.set_authorization(Basic {
//! username: "John Doe".into(),
//! password: Some("Agent1234".into()),
//! });
//! let connector = HttpConnector::new(4, &handle);
//! let proxy_connector = ProxyConnector::from_proxy(connector, proxy).unwrap();
//! proxy_connector
//! };
//!
//! // Connecting to http will trigger regular GETs and POSTs.
//! // We need to manually append the relevant headers to the request
//! let uri: Uri = "http://my-remote-website.com".parse().unwrap();
//! let mut req = Request::new(Method::Get, uri.clone());
//! if let Some(headers) = proxy.http_headers(&uri) {
//! req.headers_mut().extend(headers.iter());
//! req.set_proxy(true);
//! }
//! let client = Client::configure().connector(proxy).build(&handle);
//! let fut_http = client.request(req)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! // Connecting to an https uri is straightforward (uses 'CONNECT' method underneath)
//! let uri = "https://my-remote-websitei-secured.com".parse().unwrap();
//! let fut_https = client
//! .get(uri)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! let futs = fut_http.join(fut_https);
//!
//! let (_http_res, _https_res) = core.run(futs).unwrap();
//! }
//! ```
#![deny(missing_docs)]
extern crate bytes;
#[macro_use]
extern crate futures;
extern crate hyper;
#[cfg(test)]
extern crate hyper_tls;
#[cfg(feature = "tls")]
extern crate native_tls;
extern crate tokio_core;
extern crate tokio_io;
#[cfg(feature = "tls")]
extern crate tokio_tls;
mod tunnel;
mod stream;
use std::any::Any;
use std::fmt;
use std::io;
use std::sync::Arc;
use futures::Future;
use hyper::Uri;
use hyper::client::Service;
use hyper::header::{Authorization, Header, Headers, ProxyAuthorization, Scheme};
#[cfg(feature = "tls")]
use native_tls::TlsConnector;
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "tls")]
use tokio_tls::TlsConnectorExt;
use stream::ProxyStream;
/// The Intercept enum to filter connections
#[derive(Debug, Clone)]
pub enum Intercept {
/// All incoming connection will go through proxy
All,
/// Only http connections will go through proxy
Http,
/// Only https connections will go through proxy
Https,
/// No connection will go through this proxy
None,
/// A custom intercept
Custom(Custom),
}
/// A Custom struct to proxy custom uris
#[derive(Clone)]
pub struct Custom(Arc<Fn(&Uri) -> bool + Send + Sync>);
impl fmt::Debug for Custom {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "_")
}
}
impl<F: Fn(&Uri) -> bool + Send + Sync +'static> From<F> for Custom {
fn from(f: F) -> Custom {
Custom(Arc::new(f))
}
}
impl Intercept {
/// A function to check if given `Uri` is proxied
pub fn matches(&self, uri: &Uri) -> bool |
}
impl<F: Fn(&Uri) -> bool + Send + Sync +'static> From<F> for Intercept {
fn from(f: F) -> Intercept {
Intercept::Custom(f.into())
}
}
/// A Proxy strcut
#[derive(Clone, Debug)]
pub struct Proxy {
intercept: Intercept,
headers: Headers,
uri: Uri,
}
impl Proxy {
/// Create a new `Proxy`
pub fn new<I: Into<Intercept>>(intercept: I, uri: Uri) -> Proxy {
Proxy {
intercept: intercept.into(),
uri: uri,
headers: Headers::new(),
}
}
/// Set `Proxy` authorization
pub fn set_authorization<S: Scheme + Any>(&mut self, scheme: S) {
match self.intercept {
Intercept::Http => self.headers.set(Authorization(scheme)),
Intercept::Https => self.headers.set(ProxyAuthorization(scheme)),
_ => {
self.headers.set(ProxyAuthorization(scheme.clone()));
self.headers.set(Authorization(scheme));
}
}
}
/// Set a custom header
pub fn set_header<H: Header>(&mut self, header: H) {
self.headers.set(header);
}
/// Get current intercept
pub fn intercept(&self) -> &Intercept {
&self.intercept
}
/// Get current `Headers` which must be sent to proxy
pub fn headers(&self) -> &Headers {
&self.headers
}
/// Get proxy uri
pub fn uri(&self) -> &Uri {
&self.uri
}
}
/// A wrapper around `Proxy`s with a connector.
#[derive(Clone)]
pub struct ProxyConnector<C> {
proxies: Vec<Proxy>,
connector: C,
#[cfg(feature = "tls")]
tls: Option<TlsConnector>,
#[cfg(not(feature = "tls"))]
tls: Option<()>,
}
impl<C: fmt::Debug> fmt::Debug for ProxyConnector<C> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
f,
"ProxyConnector {}{{ proxies: {:?}, connector: {:?} }}",
if self.tls.is_some() {
""
} else {
"(unsecured)"
},
self.proxies,
self.connector
)
}
}
impl<C> ProxyConnector<C> {
/// Create a new secured Proxies
#[cfg(feature = "tls")]
pub fn new(connector: C) -> Result<Self, io::Error> {
let tls = TlsConnector::builder()
.and_then(|b| b.build())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: Some(tls),
})
}
/// Create a new unsecured Proxy
pub fn unsecured(connector: C) -> Self {
ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: None,
}
}
/// Create a proxy connector and attach a particular proxy
#[cfg(feature = "tls")]
pub fn from_proxy(connector: C, proxy: Proxy) -> Result<Self, io::Error> {
let mut c = ProxyConnector::new(connector)?;
c.proxies.push(proxy);
Ok(c)
}
/// Create a proxy connector and attach a particular proxy
pub fn from_proxy_unsecured(connector: C, proxy: Proxy) -> Self {
let mut c = ProxyConnector::unsecured(connector);
c.proxies.push(proxy);
c
}
/// Change proxy connector
pub fn with_connector<CC>(self, connector: CC) -> ProxyConnector<CC> {
ProxyConnector {
connector: connector,
proxies: self.proxies,
tls: self.tls,
}
}
/// Set or unset tls when tunneling
#[cfg(feature = "tls")]
pub fn set_tls(&mut self, tls: Option<TlsConnector>) {
self.tls = tls;
}
/// Get the current proxies
pub fn proxies(&self) -> &[Proxy] {
&self.proxies
}
/// Add a new additional proxy
pub fn add_proxy(&mut self, proxy: Proxy) {
self.proxies.push(proxy);
}
/// Extend the list of proxies
pub fn extend_proxies<I: IntoIterator<Item = Proxy>>(&mut self, proxies: I) {
self.proxies.extend(proxies)
}
/// Get http headers for a matching uri
///
/// These headers must be appended to the hyper Request for the proxy to work properly.
/// This is needed only for http requests.
pub fn http_headers(&self, uri: &Uri) -> Option<&Headers> {
if uri.scheme()!= Some("http") {
return None;
}
self.match_proxy(uri).map(|p| &p.headers)
}
fn match_proxy(&self, uri: &Uri) -> Option<&Proxy> {
self.proxies.iter().find(|p| p.intercept.matches(uri))
}
}
impl<C> Service for ProxyConnector<C>
where
C: Service<Request = Uri, Error = io::Error> +'static,
C::Future:'static,
<C::Future as Future>::Item: AsyncRead + AsyncWrite +'static,
{
type Request = Uri;
type Response = ProxyStream<C::Response>;
type Error = io::Error;
type Future = Box<Future<Item = ProxyStream<C::Response>, Error = Self::Error>>;
fn call(&self, uri: Uri) -> Self::Future {
if let Some(ref p) = self.match_proxy(&uri) {
if uri.scheme() == Some("https") {
let host = uri.host().unwrap().to_owned();
let port = uri.port().unwrap_or(443);
let tunnel = tunnel::Tunnel::new(&host, port, &p.headers);
let proxy_stream = self.connector
.call(p.uri.clone())
.and_then(move |io| tunnel.with_stream(io));
match self.tls.as_ref() {
#[cfg(feature = "tls")]
Some(tls) => {
let tls = tls.clone();
Box::new(
proxy_stream
.and_then(move |io| tls.connect_async(&host, io).map_err(io_err))
.map(|s| ProxyStream::Secured(s)),
)
},
#[cfg(not(feature = "tls"))]
Some(_) => panic!("hyper-proxy was not built with TLS support"),
None => Box::new(proxy_stream.map(|s| ProxyStream::Regular(s))),
}
} else {
// without TLS, there is absolutely zero benefit from tunneling, as the proxy can
// read the plaintext traffic. Thus, tunneling is just restrictive to the proxies
// resources.
Box::new(
self.connector
.call(p.uri.clone())
.map(|s| ProxyStream::Regular(s)),
)
}
} else {
Box::new(self.connector.call(uri).map(|s| ProxyStream::Regular(s)))
}
}
}
#[inline]
fn io_err<E: Into<Box<::std::error::Error + Send + Sync>>>(e: E) -> io::Error {
io::Error::new(io::ErrorKind::Other, e)
}
| {
match (self, uri.scheme()) {
(&Intercept::All, _)
| (&Intercept::Http, Some("http"))
| (&Intercept::Https, Some("https")) => true,
(&Intercept::Custom(Custom(ref f)), _) => f(uri),
_ => false,
}
} | identifier_body |
lib.rs | //! A Proxy Connector crate for Hyper based applications
//!
//! # Example
//! ```rust,no_run
//! extern crate hyper;
//! extern crate hyper_proxy;
//! extern crate futures;
//! extern crate tokio_core;
//!
//! use hyper::{Chunk, Client, Request, Method, Uri};
//! use hyper::client::HttpConnector;
//! use hyper::header::Basic;
//! use futures::{Future, Stream};
//! use hyper_proxy::{Proxy, ProxyConnector, Intercept};
//! use tokio_core::reactor::Core;
//!
//! fn main() {
//! let mut core = Core::new().unwrap();
//! let handle = core.handle();
//!
//! let proxy = {
//! let proxy_uri = "http://my-proxy:8080".parse().unwrap();
//! let mut proxy = Proxy::new(Intercept::All, proxy_uri);
//! proxy.set_authorization(Basic {
//! username: "John Doe".into(),
//! password: Some("Agent1234".into()),
//! });
//! let connector = HttpConnector::new(4, &handle);
//! let proxy_connector = ProxyConnector::from_proxy(connector, proxy).unwrap();
//! proxy_connector
//! };
//!
//! // Connecting to http will trigger regular GETs and POSTs.
//! // We need to manually append the relevant headers to the request
//! let uri: Uri = "http://my-remote-website.com".parse().unwrap();
//! let mut req = Request::new(Method::Get, uri.clone());
//! if let Some(headers) = proxy.http_headers(&uri) {
//! req.headers_mut().extend(headers.iter());
//! req.set_proxy(true);
//! }
//! let client = Client::configure().connector(proxy).build(&handle);
//! let fut_http = client.request(req)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! // Connecting to an https uri is straightforward (uses 'CONNECT' method underneath)
//! let uri = "https://my-remote-websitei-secured.com".parse().unwrap();
//! let fut_https = client
//! .get(uri)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! let futs = fut_http.join(fut_https);
//!
//! let (_http_res, _https_res) = core.run(futs).unwrap();
//! }
//! ```
#![deny(missing_docs)]
extern crate bytes;
#[macro_use]
extern crate futures;
extern crate hyper;
#[cfg(test)]
extern crate hyper_tls;
#[cfg(feature = "tls")]
extern crate native_tls;
extern crate tokio_core;
extern crate tokio_io;
#[cfg(feature = "tls")]
extern crate tokio_tls;
mod tunnel;
mod stream;
use std::any::Any;
use std::fmt;
use std::io;
use std::sync::Arc;
use futures::Future;
use hyper::Uri;
use hyper::client::Service;
use hyper::header::{Authorization, Header, Headers, ProxyAuthorization, Scheme};
#[cfg(feature = "tls")]
use native_tls::TlsConnector;
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "tls")]
use tokio_tls::TlsConnectorExt;
use stream::ProxyStream;
/// The Intercept enum to filter connections
#[derive(Debug, Clone)]
pub enum Intercept {
/// All incoming connection will go through proxy
All,
/// Only http connections will go through proxy
Http,
/// Only https connections will go through proxy
Https,
/// No connection will go through this proxy
None,
/// A custom intercept
Custom(Custom),
}
/// A Custom struct to proxy custom uris
#[derive(Clone)]
pub struct Custom(Arc<Fn(&Uri) -> bool + Send + Sync>);
impl fmt::Debug for Custom {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "_")
}
}
impl<F: Fn(&Uri) -> bool + Send + Sync +'static> From<F> for Custom {
fn from(f: F) -> Custom {
Custom(Arc::new(f))
}
}
impl Intercept {
/// A function to check if given `Uri` is proxied
pub fn matches(&self, uri: &Uri) -> bool {
match (self, uri.scheme()) {
(&Intercept::All, _)
| (&Intercept::Http, Some("http"))
| (&Intercept::Https, Some("https")) => true,
(&Intercept::Custom(Custom(ref f)), _) => f(uri),
_ => false,
}
}
}
impl<F: Fn(&Uri) -> bool + Send + Sync +'static> From<F> for Intercept {
fn from(f: F) -> Intercept {
Intercept::Custom(f.into())
}
}
/// A Proxy strcut | pub struct Proxy {
intercept: Intercept,
headers: Headers,
uri: Uri,
}
impl Proxy {
/// Create a new `Proxy`
pub fn new<I: Into<Intercept>>(intercept: I, uri: Uri) -> Proxy {
Proxy {
intercept: intercept.into(),
uri: uri,
headers: Headers::new(),
}
}
/// Set `Proxy` authorization
pub fn set_authorization<S: Scheme + Any>(&mut self, scheme: S) {
match self.intercept {
Intercept::Http => self.headers.set(Authorization(scheme)),
Intercept::Https => self.headers.set(ProxyAuthorization(scheme)),
_ => {
self.headers.set(ProxyAuthorization(scheme.clone()));
self.headers.set(Authorization(scheme));
}
}
}
/// Set a custom header
pub fn set_header<H: Header>(&mut self, header: H) {
self.headers.set(header);
}
/// Get current intercept
pub fn intercept(&self) -> &Intercept {
&self.intercept
}
/// Get current `Headers` which must be sent to proxy
pub fn headers(&self) -> &Headers {
&self.headers
}
/// Get proxy uri
pub fn uri(&self) -> &Uri {
&self.uri
}
}
/// A wrapper around `Proxy`s with a connector.
#[derive(Clone)]
pub struct ProxyConnector<C> {
proxies: Vec<Proxy>,
connector: C,
#[cfg(feature = "tls")]
tls: Option<TlsConnector>,
#[cfg(not(feature = "tls"))]
tls: Option<()>,
}
impl<C: fmt::Debug> fmt::Debug for ProxyConnector<C> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
f,
"ProxyConnector {}{{ proxies: {:?}, connector: {:?} }}",
if self.tls.is_some() {
""
} else {
"(unsecured)"
},
self.proxies,
self.connector
)
}
}
impl<C> ProxyConnector<C> {
/// Create a new secured Proxies
#[cfg(feature = "tls")]
pub fn new(connector: C) -> Result<Self, io::Error> {
let tls = TlsConnector::builder()
.and_then(|b| b.build())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: Some(tls),
})
}
/// Create a new unsecured Proxy
pub fn unsecured(connector: C) -> Self {
ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: None,
}
}
/// Create a proxy connector and attach a particular proxy
#[cfg(feature = "tls")]
pub fn from_proxy(connector: C, proxy: Proxy) -> Result<Self, io::Error> {
let mut c = ProxyConnector::new(connector)?;
c.proxies.push(proxy);
Ok(c)
}
/// Create a proxy connector and attach a particular proxy
pub fn from_proxy_unsecured(connector: C, proxy: Proxy) -> Self {
let mut c = ProxyConnector::unsecured(connector);
c.proxies.push(proxy);
c
}
/// Change proxy connector
pub fn with_connector<CC>(self, connector: CC) -> ProxyConnector<CC> {
ProxyConnector {
connector: connector,
proxies: self.proxies,
tls: self.tls,
}
}
/// Set or unset tls when tunneling
#[cfg(feature = "tls")]
pub fn set_tls(&mut self, tls: Option<TlsConnector>) {
self.tls = tls;
}
/// Get the current proxies
pub fn proxies(&self) -> &[Proxy] {
&self.proxies
}
/// Add a new additional proxy
pub fn add_proxy(&mut self, proxy: Proxy) {
self.proxies.push(proxy);
}
/// Extend the list of proxies
pub fn extend_proxies<I: IntoIterator<Item = Proxy>>(&mut self, proxies: I) {
self.proxies.extend(proxies)
}
/// Get http headers for a matching uri
///
/// These headers must be appended to the hyper Request for the proxy to work properly.
/// This is needed only for http requests.
pub fn http_headers(&self, uri: &Uri) -> Option<&Headers> {
if uri.scheme()!= Some("http") {
return None;
}
self.match_proxy(uri).map(|p| &p.headers)
}
fn match_proxy(&self, uri: &Uri) -> Option<&Proxy> {
self.proxies.iter().find(|p| p.intercept.matches(uri))
}
}
impl<C> Service for ProxyConnector<C>
where
C: Service<Request = Uri, Error = io::Error> +'static,
C::Future:'static,
<C::Future as Future>::Item: AsyncRead + AsyncWrite +'static,
{
type Request = Uri;
type Response = ProxyStream<C::Response>;
type Error = io::Error;
type Future = Box<Future<Item = ProxyStream<C::Response>, Error = Self::Error>>;
fn call(&self, uri: Uri) -> Self::Future {
if let Some(ref p) = self.match_proxy(&uri) {
if uri.scheme() == Some("https") {
let host = uri.host().unwrap().to_owned();
let port = uri.port().unwrap_or(443);
let tunnel = tunnel::Tunnel::new(&host, port, &p.headers);
let proxy_stream = self.connector
.call(p.uri.clone())
.and_then(move |io| tunnel.with_stream(io));
match self.tls.as_ref() {
#[cfg(feature = "tls")]
Some(tls) => {
let tls = tls.clone();
Box::new(
proxy_stream
.and_then(move |io| tls.connect_async(&host, io).map_err(io_err))
.map(|s| ProxyStream::Secured(s)),
)
},
#[cfg(not(feature = "tls"))]
Some(_) => panic!("hyper-proxy was not built with TLS support"),
None => Box::new(proxy_stream.map(|s| ProxyStream::Regular(s))),
}
} else {
// without TLS, there is absolutely zero benefit from tunneling, as the proxy can
// read the plaintext traffic. Thus, tunneling is just restrictive to the proxies
// resources.
Box::new(
self.connector
.call(p.uri.clone())
.map(|s| ProxyStream::Regular(s)),
)
}
} else {
Box::new(self.connector.call(uri).map(|s| ProxyStream::Regular(s)))
}
}
}
#[inline]
fn io_err<E: Into<Box<::std::error::Error + Send + Sync>>>(e: E) -> io::Error {
io::Error::new(io::ErrorKind::Other, e)
} | #[derive(Clone, Debug)] | random_line_split |
mod.rs | //! Metrics
//! ---
//! Contains a set of optimization metrics
//!
//! These are useful for different scorers
extern crate es_data;
extern crate float_ord;
extern crate hashbrown;
use self::es_data::dataset::types::{MetaType, Metadata};
use self::hashbrown::HashMap;
use self::float_ord::FloatOrd;
/// Computes DCG@K for a given relevance set
fn dcg(scores: &[f32], k: usize) -> f64 {
let mut rdcg = 0f64;
for i in 0..k {
let s = scores[i];
rdcg += ((2f64).powi(s as i32) - 1.) / (2. + i as f64).log2()
}
rdcg
}
/// Computes NDCG@K for a given relevance set
pub fn ndcg(scores: &mut [f32], k: Option<usize>) -> f64 |
#[inline]
/// Gets relevance for ERR
fn get_relevance(score: f32, score_max: f32) -> f32 {
(2f32.powf(score) - 1.) / 2f32.powf(score_max)
}
/// Computes ERR. Assumes scores are sorted
pub fn get_err(scores: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(scores.len()).min(scores.len());
let score_max = scores
.iter()
.max_by_key(|x| FloatOrd(**x))
.expect("Must have a maximum score");
let mut err = 0.0;
let mut p = 1.0;
for rank in 1..=k {
let relevance = get_relevance(scores[rank - 1], *score_max);
err += p * relevance / (rank as f32);
p *= 1. - relevance;
}
err
}
/// Gets the weights for sub-topics for Discrete-ERRIA. Computes p(t | q)
pub fn get_subtopic_weights(subtopics: &[u32]) -> HashMap<u32, f32> {
let mut weights = HashMap::new();
let num_examples = subtopics.len();
if num_examples == 0 {
return weights;
}
for topic in subtopics.iter() {
let counter = weights.entry(*topic).or_insert(0.);
*counter += 1.;
}
for (_, val) in weights.iter_mut() {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize];
let right = vals[pos.ceil() as usize];
let delta = pos.fract();
left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e-6);
}
}
#[test]
fn test_get_subtopic_weights() {
let mut str_data = Vec::new();
let mut expected = HashMap::new();
for i in 0..10 {
{
let mut metadata = Metadata::new();
metadata.insert("taxonomy".to_string(), MetaType::Str(format!("{:?}", i)));
str_data.push(metadata);
expected.insert(i, 1. / 30.);
}
{
let mut metadata = Metadata::new();
metadata.insert(
"taxonomy".to_string(),
MetaType::Str(format!("2{:?}", i / 10)),
);
str_data.push(metadata);
expected.insert(20 + i / 10, 1. / 3.);
}
{
let metadata = Metadata::new();
str_data.push(metadata);
expected.insert(std::u32::MAX, 1. / 3.);
}
}
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let sub: Vec<_> = str_data.iter().collect();
let subtopics = get_subtopics(&sub, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(subtopics.len(), sub.len());
println!("Weights: {:?}", weights);
println!("expected: {:?}", expected);
assert_eq!(weights.len(), expected.len());
for (key, val) in expected.iter() {
assert!(weights.contains_key(key));
let actual_val = weights.get(key).expect("key should be in weights");
assert!((val - actual_val).abs() < 1e-6);
}
}
#[test]
fn test_err_ia() {
let mut cat1_metadata = Metadata::new();
cat1_metadata.insert("taxonomy".to_string(), MetaType::Str("1".to_string()));
let mut cat2_metadata = Metadata::new();
cat2_metadata.insert("taxonomy".to_string(), MetaType::Str("2".to_string()));
let scores = vec![
(4., &cat1_metadata),
(0., &cat2_metadata),
(2., &cat1_metadata),
(1., &cat2_metadata),
(2., &cat2_metadata),
];
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let metadata: Vec<_> = scores.iter().map(|x| x.1).collect();
let just_scores: Vec<_> = scores.iter().map(|x| x.0).collect();
let subtopics = get_subtopics(&metadata, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(0)),
0f32
);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(1)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(2)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(3)) - 0.3765625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(4)) - 0.4140625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(5)) - 0.4815625).abs() < 1e-6);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, None),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(10)),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
}
#[test]
fn test_interpolate() {
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, None), 2.0);
assert_eq!(interpolate(&values, 25, None), 2.0);
assert_eq!(interpolate(&values, 50, None), 3.0);
assert_eq!(interpolate(&values, 100, None), 4.0);
}
{
let values = vec![2.0, 4.0, 100.0];
assert_eq!(interpolate(&values, 50, None), 4.0);
}
{
// Example from wikipedia
let values = vec![15.0, 20.0, 35.0, 40.0, 50.0];
assert_eq!(interpolate(&values, 5, None), 15.0);
assert_eq!(interpolate(&values, 30, None), 20.0);
assert_eq!(interpolate(&values, 40, None), 27.5);
assert_eq!(interpolate(&values, 95, None), 50.0);
}
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, Some(1.0)), 2.0);
assert_eq!(interpolate(&values, 10, Some(1.0)), 2.2);
assert_eq!(interpolate(&values, 25, Some(1.0)), 2.5);
assert_eq!(interpolate(&values, 75, Some(1.0)), 3.5);
assert_eq!(interpolate(&values, 100, Some(1.0)), 4.0);
}
}
#[test]
fn test_get_percentiles() {
let mut values = vec![1000.0, 20.0, 100.0];
let quantiles = vec![50];
assert_eq!(get_percentiles(&mut values, &quantiles, None), 100.0);
}
}
| {
let size = k.unwrap_or(scores.len()).min(scores.len());
let r_dcg = dcg(scores, size);
// Sort them in ascending order
scores.sort_by_key(|v| FloatOrd(-*v));
let idcg = dcg(scores, size);
if idcg > 0.0 {
r_dcg / idcg
} else {
0.0
}
} | identifier_body |
mod.rs | //! Metrics
//! ---
//! Contains a set of optimization metrics
//!
//! These are useful for different scorers
extern crate es_data;
extern crate float_ord;
extern crate hashbrown;
use self::es_data::dataset::types::{MetaType, Metadata};
use self::hashbrown::HashMap;
use self::float_ord::FloatOrd;
/// Computes DCG@K for a given relevance set
fn dcg(scores: &[f32], k: usize) -> f64 {
let mut rdcg = 0f64;
for i in 0..k {
let s = scores[i];
rdcg += ((2f64).powi(s as i32) - 1.) / (2. + i as f64).log2()
}
rdcg
}
/// Computes NDCG@K for a given relevance set
pub fn ndcg(scores: &mut [f32], k: Option<usize>) -> f64 {
let size = k.unwrap_or(scores.len()).min(scores.len());
let r_dcg = dcg(scores, size);
// Sort them in ascending order
scores.sort_by_key(|v| FloatOrd(-*v));
let idcg = dcg(scores, size);
if idcg > 0.0 {
r_dcg / idcg
} else {
0.0
}
}
#[inline]
/// Gets relevance for ERR
fn get_relevance(score: f32, score_max: f32) -> f32 {
(2f32.powf(score) - 1.) / 2f32.powf(score_max)
}
/// Computes ERR. Assumes scores are sorted
pub fn get_err(scores: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(scores.len()).min(scores.len());
let score_max = scores
.iter()
.max_by_key(|x| FloatOrd(**x))
.expect("Must have a maximum score");
let mut err = 0.0;
let mut p = 1.0;
for rank in 1..=k {
let relevance = get_relevance(scores[rank - 1], *score_max);
err += p * relevance / (rank as f32);
p *= 1. - relevance;
}
err
}
/// Gets the weights for sub-topics for Discrete-ERRIA. Computes p(t | q)
pub fn get_subtopic_weights(subtopics: &[u32]) -> HashMap<u32, f32> {
let mut weights = HashMap::new();
let num_examples = subtopics.len();
if num_examples == 0 {
return weights;
}
for topic in subtopics.iter() {
let counter = weights.entry(*topic).or_insert(0.);
*counter += 1.;
}
for (_, val) in weights.iter_mut() {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize];
let right = vals[pos.ceil() as usize];
let delta = pos.fract();
left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e-6);
}
}
#[test]
fn test_get_subtopic_weights() {
let mut str_data = Vec::new();
let mut expected = HashMap::new();
for i in 0..10 {
{
let mut metadata = Metadata::new();
metadata.insert("taxonomy".to_string(), MetaType::Str(format!("{:?}", i)));
str_data.push(metadata);
expected.insert(i, 1. / 30.);
}
{
let mut metadata = Metadata::new();
metadata.insert(
"taxonomy".to_string(),
MetaType::Str(format!("2{:?}", i / 10)),
);
str_data.push(metadata);
expected.insert(20 + i / 10, 1. / 3.);
}
{
let metadata = Metadata::new();
str_data.push(metadata);
expected.insert(std::u32::MAX, 1. / 3.);
}
}
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let sub: Vec<_> = str_data.iter().collect();
let subtopics = get_subtopics(&sub, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(subtopics.len(), sub.len());
println!("Weights: {:?}", weights);
println!("expected: {:?}", expected);
assert_eq!(weights.len(), expected.len());
for (key, val) in expected.iter() {
assert!(weights.contains_key(key));
let actual_val = weights.get(key).expect("key should be in weights");
assert!((val - actual_val).abs() < 1e-6);
}
}
#[test]
fn test_err_ia() {
let mut cat1_metadata = Metadata::new();
cat1_metadata.insert("taxonomy".to_string(), MetaType::Str("1".to_string()));
let mut cat2_metadata = Metadata::new();
cat2_metadata.insert("taxonomy".to_string(), MetaType::Str("2".to_string()));
let scores = vec![
(4., &cat1_metadata),
(0., &cat2_metadata),
(2., &cat1_metadata),
(1., &cat2_metadata),
(2., &cat2_metadata),
];
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let metadata: Vec<_> = scores.iter().map(|x| x.1).collect();
let just_scores: Vec<_> = scores.iter().map(|x| x.0).collect();
let subtopics = get_subtopics(&metadata, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(0)),
0f32
);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(1)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(2)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(3)) - 0.3765625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(4)) - 0.4140625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(5)) - 0.4815625).abs() < 1e-6);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, None),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(10)),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
}
#[test]
fn test_interpolate() {
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, None), 2.0);
assert_eq!(interpolate(&values, 25, None), 2.0);
assert_eq!(interpolate(&values, 50, None), 3.0);
assert_eq!(interpolate(&values, 100, None), 4.0);
}
{
let values = vec![2.0, 4.0, 100.0];
assert_eq!(interpolate(&values, 50, None), 4.0);
}
{
// Example from wikipedia
let values = vec![15.0, 20.0, 35.0, 40.0, 50.0];
assert_eq!(interpolate(&values, 5, None), 15.0);
assert_eq!(interpolate(&values, 30, None), 20.0);
assert_eq!(interpolate(&values, 40, None), 27.5);
assert_eq!(interpolate(&values, 95, None), 50.0);
}
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, Some(1.0)), 2.0);
assert_eq!(interpolate(&values, 10, Some(1.0)), 2.2);
assert_eq!(interpolate(&values, 25, Some(1.0)), 2.5);
assert_eq!(interpolate(&values, 75, Some(1.0)), 3.5);
assert_eq!(interpolate(&values, 100, Some(1.0)), 4.0);
}
}
#[test]
fn | () {
let mut values = vec![1000.0, 20.0, 100.0];
let quantiles = vec![50];
assert_eq!(get_percentiles(&mut values, &quantiles, None), 100.0);
}
}
| test_get_percentiles | identifier_name |
mod.rs | //! Metrics
//! ---
//! Contains a set of optimization metrics
//!
//! These are useful for different scorers
extern crate es_data;
extern crate float_ord;
extern crate hashbrown;
use self::es_data::dataset::types::{MetaType, Metadata};
use self::hashbrown::HashMap;
use self::float_ord::FloatOrd;
/// Computes DCG@K for a given relevance set
fn dcg(scores: &[f32], k: usize) -> f64 {
let mut rdcg = 0f64;
for i in 0..k {
let s = scores[i];
rdcg += ((2f64).powi(s as i32) - 1.) / (2. + i as f64).log2()
}
rdcg
}
/// Computes NDCG@K for a given relevance set
pub fn ndcg(scores: &mut [f32], k: Option<usize>) -> f64 {
let size = k.unwrap_or(scores.len()).min(scores.len());
let r_dcg = dcg(scores, size);
// Sort them in ascending order
scores.sort_by_key(|v| FloatOrd(-*v));
let idcg = dcg(scores, size);
if idcg > 0.0 {
r_dcg / idcg
} else {
0.0
}
}
#[inline]
/// Gets relevance for ERR
fn get_relevance(score: f32, score_max: f32) -> f32 {
(2f32.powf(score) - 1.) / 2f32.powf(score_max)
}
/// Computes ERR. Assumes scores are sorted
pub fn get_err(scores: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(scores.len()).min(scores.len());
let score_max = scores
.iter()
.max_by_key(|x| FloatOrd(**x))
.expect("Must have a maximum score");
let mut err = 0.0;
let mut p = 1.0;
for rank in 1..=k {
let relevance = get_relevance(scores[rank - 1], *score_max);
err += p * relevance / (rank as f32);
p *= 1. - relevance;
}
err
}
/// Gets the weights for sub-topics for Discrete-ERRIA. Computes p(t | q)
pub fn get_subtopic_weights(subtopics: &[u32]) -> HashMap<u32, f32> {
let mut weights = HashMap::new();
let num_examples = subtopics.len();
if num_examples == 0 {
return weights;
}
for topic in subtopics.iter() {
let counter = weights.entry(*topic).or_insert(0.);
*counter += 1.;
}
for (_, val) in weights.iter_mut() {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize]; | left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e-6);
}
}
#[test]
fn test_get_subtopic_weights() {
let mut str_data = Vec::new();
let mut expected = HashMap::new();
for i in 0..10 {
{
let mut metadata = Metadata::new();
metadata.insert("taxonomy".to_string(), MetaType::Str(format!("{:?}", i)));
str_data.push(metadata);
expected.insert(i, 1. / 30.);
}
{
let mut metadata = Metadata::new();
metadata.insert(
"taxonomy".to_string(),
MetaType::Str(format!("2{:?}", i / 10)),
);
str_data.push(metadata);
expected.insert(20 + i / 10, 1. / 3.);
}
{
let metadata = Metadata::new();
str_data.push(metadata);
expected.insert(std::u32::MAX, 1. / 3.);
}
}
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let sub: Vec<_> = str_data.iter().collect();
let subtopics = get_subtopics(&sub, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(subtopics.len(), sub.len());
println!("Weights: {:?}", weights);
println!("expected: {:?}", expected);
assert_eq!(weights.len(), expected.len());
for (key, val) in expected.iter() {
assert!(weights.contains_key(key));
let actual_val = weights.get(key).expect("key should be in weights");
assert!((val - actual_val).abs() < 1e-6);
}
}
#[test]
fn test_err_ia() {
let mut cat1_metadata = Metadata::new();
cat1_metadata.insert("taxonomy".to_string(), MetaType::Str("1".to_string()));
let mut cat2_metadata = Metadata::new();
cat2_metadata.insert("taxonomy".to_string(), MetaType::Str("2".to_string()));
let scores = vec![
(4., &cat1_metadata),
(0., &cat2_metadata),
(2., &cat1_metadata),
(1., &cat2_metadata),
(2., &cat2_metadata),
];
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let metadata: Vec<_> = scores.iter().map(|x| x.1).collect();
let just_scores: Vec<_> = scores.iter().map(|x| x.0).collect();
let subtopics = get_subtopics(&metadata, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(0)),
0f32
);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(1)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(2)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(3)) - 0.3765625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(4)) - 0.4140625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(5)) - 0.4815625).abs() < 1e-6);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, None),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(10)),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
}
#[test]
fn test_interpolate() {
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, None), 2.0);
assert_eq!(interpolate(&values, 25, None), 2.0);
assert_eq!(interpolate(&values, 50, None), 3.0);
assert_eq!(interpolate(&values, 100, None), 4.0);
}
{
let values = vec![2.0, 4.0, 100.0];
assert_eq!(interpolate(&values, 50, None), 4.0);
}
{
// Example from wikipedia
let values = vec![15.0, 20.0, 35.0, 40.0, 50.0];
assert_eq!(interpolate(&values, 5, None), 15.0);
assert_eq!(interpolate(&values, 30, None), 20.0);
assert_eq!(interpolate(&values, 40, None), 27.5);
assert_eq!(interpolate(&values, 95, None), 50.0);
}
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, Some(1.0)), 2.0);
assert_eq!(interpolate(&values, 10, Some(1.0)), 2.2);
assert_eq!(interpolate(&values, 25, Some(1.0)), 2.5);
assert_eq!(interpolate(&values, 75, Some(1.0)), 3.5);
assert_eq!(interpolate(&values, 100, Some(1.0)), 4.0);
}
}
#[test]
fn test_get_percentiles() {
let mut values = vec![1000.0, 20.0, 100.0];
let quantiles = vec![50];
assert_eq!(get_percentiles(&mut values, &quantiles, None), 100.0);
}
} | let right = vals[pos.ceil() as usize];
let delta = pos.fract(); | random_line_split |
mod.rs | //! Metrics
//! ---
//! Contains a set of optimization metrics
//!
//! These are useful for different scorers
extern crate es_data;
extern crate float_ord;
extern crate hashbrown;
use self::es_data::dataset::types::{MetaType, Metadata};
use self::hashbrown::HashMap;
use self::float_ord::FloatOrd;
/// Computes DCG@K for a given relevance set
fn dcg(scores: &[f32], k: usize) -> f64 {
let mut rdcg = 0f64;
for i in 0..k {
let s = scores[i];
rdcg += ((2f64).powi(s as i32) - 1.) / (2. + i as f64).log2()
}
rdcg
}
/// Computes NDCG@K for a given relevance set
pub fn ndcg(scores: &mut [f32], k: Option<usize>) -> f64 {
let size = k.unwrap_or(scores.len()).min(scores.len());
let r_dcg = dcg(scores, size);
// Sort them in ascending order
scores.sort_by_key(|v| FloatOrd(-*v));
let idcg = dcg(scores, size);
if idcg > 0.0 {
r_dcg / idcg
} else {
0.0
}
}
#[inline]
/// Gets relevance for ERR
fn get_relevance(score: f32, score_max: f32) -> f32 {
(2f32.powf(score) - 1.) / 2f32.powf(score_max)
}
/// Computes ERR. Assumes scores are sorted
pub fn get_err(scores: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(scores.len()).min(scores.len());
let score_max = scores
.iter()
.max_by_key(|x| FloatOrd(**x))
.expect("Must have a maximum score");
let mut err = 0.0;
let mut p = 1.0;
for rank in 1..=k {
let relevance = get_relevance(scores[rank - 1], *score_max);
err += p * relevance / (rank as f32);
p *= 1. - relevance;
}
err
}
/// Gets the weights for sub-topics for Discrete-ERRIA. Computes p(t | q)
pub fn get_subtopic_weights(subtopics: &[u32]) -> HashMap<u32, f32> {
let mut weights = HashMap::new();
let num_examples = subtopics.len();
if num_examples == 0 |
for topic in subtopics.iter() {
let counter = weights.entry(*topic).or_insert(0.);
*counter += 1.;
}
for (_, val) in weights.iter_mut() {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize];
let right = vals[pos.ceil() as usize];
let delta = pos.fract();
left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e-6);
}
}
#[test]
fn test_get_subtopic_weights() {
let mut str_data = Vec::new();
let mut expected = HashMap::new();
for i in 0..10 {
{
let mut metadata = Metadata::new();
metadata.insert("taxonomy".to_string(), MetaType::Str(format!("{:?}", i)));
str_data.push(metadata);
expected.insert(i, 1. / 30.);
}
{
let mut metadata = Metadata::new();
metadata.insert(
"taxonomy".to_string(),
MetaType::Str(format!("2{:?}", i / 10)),
);
str_data.push(metadata);
expected.insert(20 + i / 10, 1. / 3.);
}
{
let metadata = Metadata::new();
str_data.push(metadata);
expected.insert(std::u32::MAX, 1. / 3.);
}
}
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let sub: Vec<_> = str_data.iter().collect();
let subtopics = get_subtopics(&sub, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(subtopics.len(), sub.len());
println!("Weights: {:?}", weights);
println!("expected: {:?}", expected);
assert_eq!(weights.len(), expected.len());
for (key, val) in expected.iter() {
assert!(weights.contains_key(key));
let actual_val = weights.get(key).expect("key should be in weights");
assert!((val - actual_val).abs() < 1e-6);
}
}
#[test]
fn test_err_ia() {
let mut cat1_metadata = Metadata::new();
cat1_metadata.insert("taxonomy".to_string(), MetaType::Str("1".to_string()));
let mut cat2_metadata = Metadata::new();
cat2_metadata.insert("taxonomy".to_string(), MetaType::Str("2".to_string()));
let scores = vec![
(4., &cat1_metadata),
(0., &cat2_metadata),
(2., &cat1_metadata),
(1., &cat2_metadata),
(2., &cat2_metadata),
];
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let metadata: Vec<_> = scores.iter().map(|x| x.1).collect();
let just_scores: Vec<_> = scores.iter().map(|x| x.0).collect();
let subtopics = get_subtopics(&metadata, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(0)),
0f32
);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(1)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(2)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(3)) - 0.3765625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(4)) - 0.4140625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(5)) - 0.4815625).abs() < 1e-6);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, None),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(10)),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
}
#[test]
fn test_interpolate() {
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, None), 2.0);
assert_eq!(interpolate(&values, 25, None), 2.0);
assert_eq!(interpolate(&values, 50, None), 3.0);
assert_eq!(interpolate(&values, 100, None), 4.0);
}
{
let values = vec![2.0, 4.0, 100.0];
assert_eq!(interpolate(&values, 50, None), 4.0);
}
{
// Example from wikipedia
let values = vec![15.0, 20.0, 35.0, 40.0, 50.0];
assert_eq!(interpolate(&values, 5, None), 15.0);
assert_eq!(interpolate(&values, 30, None), 20.0);
assert_eq!(interpolate(&values, 40, None), 27.5);
assert_eq!(interpolate(&values, 95, None), 50.0);
}
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, Some(1.0)), 2.0);
assert_eq!(interpolate(&values, 10, Some(1.0)), 2.2);
assert_eq!(interpolate(&values, 25, Some(1.0)), 2.5);
assert_eq!(interpolate(&values, 75, Some(1.0)), 3.5);
assert_eq!(interpolate(&values, 100, Some(1.0)), 4.0);
}
}
#[test]
fn test_get_percentiles() {
let mut values = vec![1000.0, 20.0, 100.0];
let quantiles = vec![50];
assert_eq!(get_percentiles(&mut values, &quantiles, None), 100.0);
}
}
| {
return weights;
} | conditional_block |
utils.rs | use crate::{
acc::{AccPublicKey, AccSecretKey},
chain::{block::Height, object::Object, query::query_param::QueryParam, traits::Num},
};
use anyhow::{ensure, Context, Error, Result};
use howlong::ProcessDuration;
use memmap2::Mmap;
use rand::{CryptoRng, RngCore};
use serde::{Deserialize, Serialize};
use snap::{read::FrameDecoder, write::FrameEncoder};
use std::{
collections::{BTreeMap, HashSet},
error::Error as StdError,
fs,
fs::File,
io::{prelude::*, BufReader},
path::{Path, PathBuf},
str::FromStr,
};
use tracing_subscriber::EnvFilter;
#[macro_export]
macro_rules! create_id_type_by_u32 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u32);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU32, Ordering};
static ID_CNT: AtomicU32 = AtomicU32::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
#[macro_export]
macro_rules! create_id_type_by_u16 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u16);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU16, Ordering};
static ID_CNT: AtomicU16 = AtomicU16::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
pub fn load_query_param_from_file(path: &Path) -> Result<Vec<QueryParam<u32>>> {
let data = fs::read_to_string(path)?;
let query_params: Vec<QueryParam<u32>> = serde_json::from_str(&data)?;
Ok(query_params)
}
// input format: block_id sep [ v_data ] sep { w_data }
// sep = \t or space
// v_data = v_1 comma v_2...
// w_data = w_1 comma w_2...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send +'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send +'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s|!s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s|!s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
}
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn | (path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx6 = qp_dag.add_node(QPNode::Union(union.clone()));
qp_dag.add_edge(idx4, idx0, true);
qp_dag.add_edge(idx4, idx1, false);
qp_dag.add_edge(idx5, idx2, true);
qp_dag.add_edge(idx5, idx3, false);
qp_dag.add_edge(idx6, idx4, true);
qp_dag.add_edge(idx6, idx5, false);
let size_original = bincode::serialize(&qp_dag).unwrap().len();
qp_dag.remove_node(idx0);
qp_dag.remove_node(idx1);
qp_dag.remove_node(idx2);
qp_dag.remove_node(idx3);
let size_update = bincode::serialize(&qp_dag).unwrap().len();
println!("before: {}", size_original);
println!("after: {}", size_update);
assert_eq!(1, 1);
}
#[test]
fn test_compress() {
let value = String::from("hello world");
let bin = binary_encode(&value).unwrap();
assert_eq!(binary_decode::<String>(bin.as_ref()).unwrap(), value);
}
#[test]
fn test_acc_size() {
use crate::chain::tests::PUB_KEY;
let set = set! {11, 12, 13, 14, 15, 16, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
let acc = AccValue::from_set(&set, &PUB_KEY);
let acc_size = bincode::serialize(&acc).unwrap().len();
let dig = acc.to_digest();
let dig_size = bincode::serialize(&dig).unwrap().len();
assert_eq!(dig_size, 32);
assert_eq!(acc_size, 416);
}
#[test]
fn test_proof_size() {
use crate::chain::tests::PUB_KEY;
let set1 = set! {11, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30};
let set2 = set! {12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 33, 23, };
let acc1 = AccValue::from_set(&set1, &PUB_KEY);
let acc2 = AccValue::from_set(&set2, &PUB_KEY);
let (_set, _acc, inter_proof) =
compute_set_operation_intermediate(Op::Union, &set1, &acc1, &set2, &acc2, &PUB_KEY);
let (_set, final_proof) = compute_set_operation_final(Op::Union, &set1, &set2, &PUB_KEY);
let inter_size = bincode::serialize(&inter_proof).unwrap().len();
let final_size = bincode::serialize(&final_proof).unwrap().len();
assert_eq!(inter_size, 564);
assert_eq!(final_size, 204);
}
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId(u8);
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId2(u64);
#[test]
fn test_int_size() {
let a: u8 = 1;
let b: u32 = 1;
let c: u64 = 1;
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
let c_size = bincode::serialize(&c).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 4);
assert_eq!(c_size, 8);
let a = TestId(1);
let b = TestId2(1);
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 8);
let c = Some(b);
let d: Option<TestId2> = None;
let c_size = bincode::serialize(&c).unwrap().len();
let d_size = bincode::serialize(&d).unwrap().len();
assert_eq!(c_size, 9);
assert_eq!(d_size, 1);
}
#[test]
fn test_str_size() {
let a: smol_str::SmolStr = smol_str::SmolStr::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a: String = String::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a = String::from("53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
let a = smol_str::SmolStr::from(
"53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78",
);
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
}
}
| pk_path | identifier_name |
utils.rs | use crate::{
acc::{AccPublicKey, AccSecretKey},
chain::{block::Height, object::Object, query::query_param::QueryParam, traits::Num},
};
use anyhow::{ensure, Context, Error, Result};
use howlong::ProcessDuration;
use memmap2::Mmap;
use rand::{CryptoRng, RngCore};
use serde::{Deserialize, Serialize};
use snap::{read::FrameDecoder, write::FrameEncoder};
use std::{
collections::{BTreeMap, HashSet},
error::Error as StdError,
fs,
fs::File,
io::{prelude::*, BufReader},
path::{Path, PathBuf},
str::FromStr,
};
use tracing_subscriber::EnvFilter;
#[macro_export]
macro_rules! create_id_type_by_u32 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u32);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU32, Ordering};
static ID_CNT: AtomicU32 = AtomicU32::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
#[macro_export]
macro_rules! create_id_type_by_u16 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u16);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU16, Ordering}; | }
}
};
}
pub fn load_query_param_from_file(path: &Path) -> Result<Vec<QueryParam<u32>>> {
let data = fs::read_to_string(path)?;
let query_params: Vec<QueryParam<u32>> = serde_json::from_str(&data)?;
Ok(query_params)
}
// input format: block_id sep [ v_data ] sep { w_data }
// sep = \t or space
// v_data = v_1 comma v_2...
// w_data = w_1 comma w_2...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send +'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send +'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s|!s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s|!s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
}
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn pk_path(path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx6 = qp_dag.add_node(QPNode::Union(union.clone()));
qp_dag.add_edge(idx4, idx0, true);
qp_dag.add_edge(idx4, idx1, false);
qp_dag.add_edge(idx5, idx2, true);
qp_dag.add_edge(idx5, idx3, false);
qp_dag.add_edge(idx6, idx4, true);
qp_dag.add_edge(idx6, idx5, false);
let size_original = bincode::serialize(&qp_dag).unwrap().len();
qp_dag.remove_node(idx0);
qp_dag.remove_node(idx1);
qp_dag.remove_node(idx2);
qp_dag.remove_node(idx3);
let size_update = bincode::serialize(&qp_dag).unwrap().len();
println!("before: {}", size_original);
println!("after: {}", size_update);
assert_eq!(1, 1);
}
#[test]
fn test_compress() {
let value = String::from("hello world");
let bin = binary_encode(&value).unwrap();
assert_eq!(binary_decode::<String>(bin.as_ref()).unwrap(), value);
}
#[test]
fn test_acc_size() {
use crate::chain::tests::PUB_KEY;
let set = set! {11, 12, 13, 14, 15, 16, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
let acc = AccValue::from_set(&set, &PUB_KEY);
let acc_size = bincode::serialize(&acc).unwrap().len();
let dig = acc.to_digest();
let dig_size = bincode::serialize(&dig).unwrap().len();
assert_eq!(dig_size, 32);
assert_eq!(acc_size, 416);
}
#[test]
fn test_proof_size() {
use crate::chain::tests::PUB_KEY;
let set1 = set! {11, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30};
let set2 = set! {12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 33, 23, };
let acc1 = AccValue::from_set(&set1, &PUB_KEY);
let acc2 = AccValue::from_set(&set2, &PUB_KEY);
let (_set, _acc, inter_proof) =
compute_set_operation_intermediate(Op::Union, &set1, &acc1, &set2, &acc2, &PUB_KEY);
let (_set, final_proof) = compute_set_operation_final(Op::Union, &set1, &set2, &PUB_KEY);
let inter_size = bincode::serialize(&inter_proof).unwrap().len();
let final_size = bincode::serialize(&final_proof).unwrap().len();
assert_eq!(inter_size, 564);
assert_eq!(final_size, 204);
}
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId(u8);
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId2(u64);
#[test]
fn test_int_size() {
let a: u8 = 1;
let b: u32 = 1;
let c: u64 = 1;
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
let c_size = bincode::serialize(&c).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 4);
assert_eq!(c_size, 8);
let a = TestId(1);
let b = TestId2(1);
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 8);
let c = Some(b);
let d: Option<TestId2> = None;
let c_size = bincode::serialize(&c).unwrap().len();
let d_size = bincode::serialize(&d).unwrap().len();
assert_eq!(c_size, 9);
assert_eq!(d_size, 1);
}
#[test]
fn test_str_size() {
let a: smol_str::SmolStr = smol_str::SmolStr::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a: String = String::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a = String::from("53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
let a = smol_str::SmolStr::from(
"53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78",
);
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
}
} | static ID_CNT: AtomicU16 = AtomicU16::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst)) | random_line_split |
utils.rs | use crate::{
acc::{AccPublicKey, AccSecretKey},
chain::{block::Height, object::Object, query::query_param::QueryParam, traits::Num},
};
use anyhow::{ensure, Context, Error, Result};
use howlong::ProcessDuration;
use memmap2::Mmap;
use rand::{CryptoRng, RngCore};
use serde::{Deserialize, Serialize};
use snap::{read::FrameDecoder, write::FrameEncoder};
use std::{
collections::{BTreeMap, HashSet},
error::Error as StdError,
fs,
fs::File,
io::{prelude::*, BufReader},
path::{Path, PathBuf},
str::FromStr,
};
use tracing_subscriber::EnvFilter;
#[macro_export]
macro_rules! create_id_type_by_u32 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u32);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU32, Ordering};
static ID_CNT: AtomicU32 = AtomicU32::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
#[macro_export]
macro_rules! create_id_type_by_u16 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u16);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU16, Ordering};
static ID_CNT: AtomicU16 = AtomicU16::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
pub fn load_query_param_from_file(path: &Path) -> Result<Vec<QueryParam<u32>>> {
let data = fs::read_to_string(path)?;
let query_params: Vec<QueryParam<u32>> = serde_json::from_str(&data)?;
Ok(query_params)
}
// input format: block_id sep [ v_data ] sep { w_data }
// sep = \t or space
// v_data = v_1 comma v_2...
// w_data = w_1 comma w_2...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send +'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send +'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s|!s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s|!s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> |
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn pk_path(path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx6 = qp_dag.add_node(QPNode::Union(union.clone()));
qp_dag.add_edge(idx4, idx0, true);
qp_dag.add_edge(idx4, idx1, false);
qp_dag.add_edge(idx5, idx2, true);
qp_dag.add_edge(idx5, idx3, false);
qp_dag.add_edge(idx6, idx4, true);
qp_dag.add_edge(idx6, idx5, false);
let size_original = bincode::serialize(&qp_dag).unwrap().len();
qp_dag.remove_node(idx0);
qp_dag.remove_node(idx1);
qp_dag.remove_node(idx2);
qp_dag.remove_node(idx3);
let size_update = bincode::serialize(&qp_dag).unwrap().len();
println!("before: {}", size_original);
println!("after: {}", size_update);
assert_eq!(1, 1);
}
#[test]
fn test_compress() {
let value = String::from("hello world");
let bin = binary_encode(&value).unwrap();
assert_eq!(binary_decode::<String>(bin.as_ref()).unwrap(), value);
}
#[test]
fn test_acc_size() {
use crate::chain::tests::PUB_KEY;
let set = set! {11, 12, 13, 14, 15, 16, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
let acc = AccValue::from_set(&set, &PUB_KEY);
let acc_size = bincode::serialize(&acc).unwrap().len();
let dig = acc.to_digest();
let dig_size = bincode::serialize(&dig).unwrap().len();
assert_eq!(dig_size, 32);
assert_eq!(acc_size, 416);
}
#[test]
fn test_proof_size() {
use crate::chain::tests::PUB_KEY;
let set1 = set! {11, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30};
let set2 = set! {12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 33, 23, };
let acc1 = AccValue::from_set(&set1, &PUB_KEY);
let acc2 = AccValue::from_set(&set2, &PUB_KEY);
let (_set, _acc, inter_proof) =
compute_set_operation_intermediate(Op::Union, &set1, &acc1, &set2, &acc2, &PUB_KEY);
let (_set, final_proof) = compute_set_operation_final(Op::Union, &set1, &set2, &PUB_KEY);
let inter_size = bincode::serialize(&inter_proof).unwrap().len();
let final_size = bincode::serialize(&final_proof).unwrap().len();
assert_eq!(inter_size, 564);
assert_eq!(final_size, 204);
}
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId(u8);
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId2(u64);
#[test]
fn test_int_size() {
let a: u8 = 1;
let b: u32 = 1;
let c: u64 = 1;
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
let c_size = bincode::serialize(&c).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 4);
assert_eq!(c_size, 8);
let a = TestId(1);
let b = TestId2(1);
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 8);
let c = Some(b);
let d: Option<TestId2> = None;
let c_size = bincode::serialize(&c).unwrap().len();
let d_size = bincode::serialize(&d).unwrap().len();
assert_eq!(c_size, 9);
assert_eq!(d_size, 1);
}
#[test]
fn test_str_size() {
let a: smol_str::SmolStr = smol_str::SmolStr::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a: String = String::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a = String::from("53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
let a = smol_str::SmolStr::from(
"53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78",
);
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
}
}
| {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
} | identifier_body |
utils.rs | use crate::{
acc::{AccPublicKey, AccSecretKey},
chain::{block::Height, object::Object, query::query_param::QueryParam, traits::Num},
};
use anyhow::{ensure, Context, Error, Result};
use howlong::ProcessDuration;
use memmap2::Mmap;
use rand::{CryptoRng, RngCore};
use serde::{Deserialize, Serialize};
use snap::{read::FrameDecoder, write::FrameEncoder};
use std::{
collections::{BTreeMap, HashSet},
error::Error as StdError,
fs,
fs::File,
io::{prelude::*, BufReader},
path::{Path, PathBuf},
str::FromStr,
};
use tracing_subscriber::EnvFilter;
#[macro_export]
macro_rules! create_id_type_by_u32 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u32);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU32, Ordering};
static ID_CNT: AtomicU32 = AtomicU32::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
#[macro_export]
macro_rules! create_id_type_by_u16 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u16);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU16, Ordering};
static ID_CNT: AtomicU16 = AtomicU16::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
pub fn load_query_param_from_file(path: &Path) -> Result<Vec<QueryParam<u32>>> {
let data = fs::read_to_string(path)?;
let query_params: Vec<QueryParam<u32>> = serde_json::from_str(&data)?;
Ok(query_params)
}
// input format: block_id sep [ v_data ] sep { w_data }
// sep = \t or space
// v_data = v_1 comma v_2...
// w_data = w_1 comma w_2...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send +'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send +'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() |
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s|!s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s|!s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
}
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn pk_path(path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx6 = qp_dag.add_node(QPNode::Union(union.clone()));
qp_dag.add_edge(idx4, idx0, true);
qp_dag.add_edge(idx4, idx1, false);
qp_dag.add_edge(idx5, idx2, true);
qp_dag.add_edge(idx5, idx3, false);
qp_dag.add_edge(idx6, idx4, true);
qp_dag.add_edge(idx6, idx5, false);
let size_original = bincode::serialize(&qp_dag).unwrap().len();
qp_dag.remove_node(idx0);
qp_dag.remove_node(idx1);
qp_dag.remove_node(idx2);
qp_dag.remove_node(idx3);
let size_update = bincode::serialize(&qp_dag).unwrap().len();
println!("before: {}", size_original);
println!("after: {}", size_update);
assert_eq!(1, 1);
}
#[test]
fn test_compress() {
let value = String::from("hello world");
let bin = binary_encode(&value).unwrap();
assert_eq!(binary_decode::<String>(bin.as_ref()).unwrap(), value);
}
#[test]
fn test_acc_size() {
use crate::chain::tests::PUB_KEY;
let set = set! {11, 12, 13, 14, 15, 16, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
let acc = AccValue::from_set(&set, &PUB_KEY);
let acc_size = bincode::serialize(&acc).unwrap().len();
let dig = acc.to_digest();
let dig_size = bincode::serialize(&dig).unwrap().len();
assert_eq!(dig_size, 32);
assert_eq!(acc_size, 416);
}
#[test]
fn test_proof_size() {
use crate::chain::tests::PUB_KEY;
let set1 = set! {11, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30};
let set2 = set! {12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 33, 23, };
let acc1 = AccValue::from_set(&set1, &PUB_KEY);
let acc2 = AccValue::from_set(&set2, &PUB_KEY);
let (_set, _acc, inter_proof) =
compute_set_operation_intermediate(Op::Union, &set1, &acc1, &set2, &acc2, &PUB_KEY);
let (_set, final_proof) = compute_set_operation_final(Op::Union, &set1, &set2, &PUB_KEY);
let inter_size = bincode::serialize(&inter_proof).unwrap().len();
let final_size = bincode::serialize(&final_proof).unwrap().len();
assert_eq!(inter_size, 564);
assert_eq!(final_size, 204);
}
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId(u8);
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId2(u64);
#[test]
fn test_int_size() {
let a: u8 = 1;
let b: u32 = 1;
let c: u64 = 1;
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
let c_size = bincode::serialize(&c).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 4);
assert_eq!(c_size, 8);
let a = TestId(1);
let b = TestId2(1);
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 8);
let c = Some(b);
let d: Option<TestId2> = None;
let c_size = bincode::serialize(&c).unwrap().len();
let d_size = bincode::serialize(&d).unwrap().len();
assert_eq!(c_size, 9);
assert_eq!(d_size, 1);
}
#[test]
fn test_str_size() {
let a: smol_str::SmolStr = smol_str::SmolStr::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a: String = String::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a = String::from("53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
let a = smol_str::SmolStr::from(
"53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78",
);
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
}
}
| {
continue;
} | conditional_block |
huffman.rs | use std::{cmp, io, usize};
use bitstream::BitRead;
use error::{Error, Result};
use util::{self, Bits};
#[derive(Debug)]
pub struct HuffmanDecoder {
lookup_table: LookupTable,
long_codes: Box<[LongCode]>,
max_code_len: usize,
}
impl HuffmanDecoder {
pub fn builder(lookup_table_bits: usize) -> HuffmanDecoderBuilder {
assert!(lookup_table_bits > 0 && lookup_table_bits < 32);
let lookup_table_len = if lookup_table_bits == 0 {
0
} else {
1 << lookup_table_bits
};
let lookup_entries = vec![LookupEntry::Null; lookup_table_len];
let long_codes = Vec::new();
HuffmanDecoderBuilder {
lookup_table: LookupTable {
entries: lookup_entries.into_boxed_slice(),
len_bits: lookup_table_bits,
},
long_codes: long_codes,
cur_codes: [None; 31],
max_code_len: 0,
}
}
pub fn decode<R: BitRead>(&self, reader: &mut R) -> Result<u32> {
let lookup_len_bits = cmp::min(self.max_code_len, self.lookup_table.len_bits);
let (mut code_bits, mut read) = try!(reader.try_read_u32_bits(lookup_len_bits));
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected EOF while reading Huffman code")));
}
let entry = &self.lookup_table.entries[code_bits as usize];
let code = match entry {
&LookupEntry::Code(code) => code,
&LookupEntry::LongCode => {
let r = try!(reader.try_read_u32_bits(self.max_code_len - lookup_len_bits));
read += r.1;
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
code_bits |= r.0 << lookup_len_bits;
try!(self.find_long_code(code_bits, read))
},
&LookupEntry::Null => return Err(Error::Undecodable("Matched a null Huffman code entry")),
};
if code.len < read {
let unread_len = read - code.len;
let unread_bits = code_bits >> code.len;
reader.unread_u32_bits(unread_bits, unread_len);
} else if code.len > read {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct HuffmanDecoderBuilder {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> | };
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
}
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) &!util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos!= 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b0000001, 0b00000001, 0b0000000001000, 0b000000001, 0b000001, 0b0000100, 0b00000000011, 0b0000101000, 0b00001011, 0b00001100, 0b000000000101, 0b00000000010010000, 0b00000000010010001, 0b00000000010010010, 0b00000000010010011, 0b0000111, 0b00010, 0b00011, 0b000010101, 0b001000, 0b0011, 0b0100, 0b00001101, 0b00100100, 0b00101, 0b01010, 0b00100101, 0b0000000001001010, 0b00000000010011, 0b0000101001000, 0b0000000001001011, 0b0010011, 0b01011, 0b01100, 0b0110100, 0b011011, 0b100, 0b101, 0b01110, 0b01101010, 0b01111]);
}
#[test]
fn overspecified() {
let mut b = HuffmanDecoder::builder(1);
b.next_code(1).unwrap();
b.next_code(1).unwrap();
assert_eq!(b.next_code(1).err().unwrap().kind(), ErrorKind::Undecodable);
}
fn test_decode(code_lens: &[usize], input: &str, expected: &[u32]) {
let max_code_len = *code_lens.iter().max().unwrap();
// Without long codes.
test_decode_(max_code_len, code_lens, input, expected);
// With long codes.
if max_code_len > 1 {
test_decode_(cmp::max(max_code_len as isize - 4, 1) as usize, code_lens, input, expected);
}
}
fn test_decode_(lookup_table_bits: usize, code_lens: &[usize], input: &str, expected: &[u32]) {
let mut b = HuffmanDecoder::builder(lookup_table_bits);
for (i, &code_len) in code_lens.iter().enumerate() {
b.create_code(i as u32, code_len).unwrap();
}
let d = b.build();
let mut reader = new_bit_reader(input);
for exp in expected {
assert_eq!(d.decode(&mut reader).unwrap(), *exp);
}
}
#[test]
fn decode_1() {
/*
0 2 codeword 00
1 4 codeword 0100
2 4 codeword 0101
3 4 codeword 0110
4 4 codeword 0111
5 2 codeword 10
6 3 codeword 110
7 3 codeword 111 */
test_decode(&[2, 4, 4, 4, 4, 2, 3, 3],
"00 111 0111 0110 110 110 111",
&[0, 7, 4, 3, 6, 6, 7]);
}
#[test]
fn decode_2() {
test_decode(&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
"001000 0000000001001011 100 000001 0000000000 01111 00010 unused: 011011",
&[20, 31, 37, 5, 0, 41, 17]);
}
} | {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if !self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true | identifier_body |
huffman.rs | use std::{cmp, io, usize};
use bitstream::BitRead;
use error::{Error, Result};
use util::{self, Bits};
#[derive(Debug)]
pub struct HuffmanDecoder {
lookup_table: LookupTable,
long_codes: Box<[LongCode]>,
max_code_len: usize,
}
impl HuffmanDecoder {
pub fn builder(lookup_table_bits: usize) -> HuffmanDecoderBuilder {
assert!(lookup_table_bits > 0 && lookup_table_bits < 32);
let lookup_table_len = if lookup_table_bits == 0 {
0
} else {
1 << lookup_table_bits
};
let lookup_entries = vec![LookupEntry::Null; lookup_table_len];
let long_codes = Vec::new();
HuffmanDecoderBuilder {
lookup_table: LookupTable {
entries: lookup_entries.into_boxed_slice(),
len_bits: lookup_table_bits,
},
long_codes: long_codes,
cur_codes: [None; 31],
max_code_len: 0,
}
}
pub fn decode<R: BitRead>(&self, reader: &mut R) -> Result<u32> {
let lookup_len_bits = cmp::min(self.max_code_len, self.lookup_table.len_bits);
let (mut code_bits, mut read) = try!(reader.try_read_u32_bits(lookup_len_bits));
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected EOF while reading Huffman code")));
}
let entry = &self.lookup_table.entries[code_bits as usize];
let code = match entry {
&LookupEntry::Code(code) => code,
&LookupEntry::LongCode => {
let r = try!(reader.try_read_u32_bits(self.max_code_len - lookup_len_bits));
read += r.1;
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
code_bits |= r.0 << lookup_len_bits;
try!(self.find_long_code(code_bits, read))
},
&LookupEntry::Null => return Err(Error::Undecodable("Matched a null Huffman code entry")),
};
if code.len < read {
let unread_len = read - code.len;
let unread_bits = code_bits >> code.len;
reader.unread_u32_bits(unread_bits, unread_len);
} else if code.len > read {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct HuffmanDecoderBuilder {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if!self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true
};
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
}
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) &!util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos!= 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b0000001, 0b00000001, 0b0000000001000, 0b000000001, 0b000001, 0b0000100, 0b00000000011, 0b0000101000, 0b00001011, 0b00001100, 0b000000000101, 0b00000000010010000, 0b00000000010010001, 0b00000000010010010, 0b00000000010010011, 0b0000111, 0b00010, 0b00011, 0b000010101, 0b001000, 0b0011, 0b0100, 0b00001101, 0b00100100, 0b00101, 0b01010, 0b00100101, 0b0000000001001010, 0b00000000010011, 0b0000101001000, 0b0000000001001011, 0b0010011, 0b01011, 0b01100, 0b0110100, 0b011011, 0b100, 0b101, 0b01110, 0b01101010, 0b01111]);
}
#[test]
fn overspecified() {
let mut b = HuffmanDecoder::builder(1);
b.next_code(1).unwrap();
b.next_code(1).unwrap();
assert_eq!(b.next_code(1).err().unwrap().kind(), ErrorKind::Undecodable);
}
fn test_decode(code_lens: &[usize], input: &str, expected: &[u32]) {
let max_code_len = *code_lens.iter().max().unwrap();
// Without long codes.
test_decode_(max_code_len, code_lens, input, expected);
// With long codes.
if max_code_len > 1 {
test_decode_(cmp::max(max_code_len as isize - 4, 1) as usize, code_lens, input, expected);
}
}
fn test_decode_(lookup_table_bits: usize, code_lens: &[usize], input: &str, expected: &[u32]) {
let mut b = HuffmanDecoder::builder(lookup_table_bits);
for (i, &code_len) in code_lens.iter().enumerate() {
b.create_code(i as u32, code_len).unwrap();
}
let d = b.build();
let mut reader = new_bit_reader(input);
for exp in expected {
assert_eq!(d.decode(&mut reader).unwrap(), *exp);
}
}
#[test]
fn decode_1() {
/*
0 2 codeword 00
1 4 codeword 0100
2 4 codeword 0101
3 4 codeword 0110
4 4 codeword 0111
5 2 codeword 10
6 3 codeword 110
7 3 codeword 111 */
test_decode(&[2, 4, 4, 4, 4, 2, 3, 3],
"00 111 0111 0110 110 110 111",
&[0, 7, 4, 3, 6, 6, 7]);
}
#[test]
fn decode_2() {
test_decode(&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
"001000 0000000001001011 100 000001 0000000000 01111 00010 unused: 011011",
&[20, 31, 37, 5, 0, 41, 17]);
} | } | random_line_split |
|
huffman.rs | use std::{cmp, io, usize};
use bitstream::BitRead;
use error::{Error, Result};
use util::{self, Bits};
#[derive(Debug)]
pub struct HuffmanDecoder {
lookup_table: LookupTable,
long_codes: Box<[LongCode]>,
max_code_len: usize,
}
impl HuffmanDecoder {
pub fn builder(lookup_table_bits: usize) -> HuffmanDecoderBuilder {
assert!(lookup_table_bits > 0 && lookup_table_bits < 32);
let lookup_table_len = if lookup_table_bits == 0 {
0
} else {
1 << lookup_table_bits
};
let lookup_entries = vec![LookupEntry::Null; lookup_table_len];
let long_codes = Vec::new();
HuffmanDecoderBuilder {
lookup_table: LookupTable {
entries: lookup_entries.into_boxed_slice(),
len_bits: lookup_table_bits,
},
long_codes: long_codes,
cur_codes: [None; 31],
max_code_len: 0,
}
}
pub fn decode<R: BitRead>(&self, reader: &mut R) -> Result<u32> {
let lookup_len_bits = cmp::min(self.max_code_len, self.lookup_table.len_bits);
let (mut code_bits, mut read) = try!(reader.try_read_u32_bits(lookup_len_bits));
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected EOF while reading Huffman code")));
}
let entry = &self.lookup_table.entries[code_bits as usize];
let code = match entry {
&LookupEntry::Code(code) => code,
&LookupEntry::LongCode => {
let r = try!(reader.try_read_u32_bits(self.max_code_len - lookup_len_bits));
read += r.1;
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
code_bits |= r.0 << lookup_len_bits;
try!(self.find_long_code(code_bits, read))
},
&LookupEntry::Null => return Err(Error::Undecodable("Matched a null Huffman code entry")),
};
if code.len < read {
let unread_len = read - code.len;
let unread_bits = code_bits >> code.len;
reader.unread_u32_bits(unread_bits, unread_len);
} else if code.len > read {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct | {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if!self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true
};
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
}
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) &!util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos!= 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b0000001, 0b00000001, 0b0000000001000, 0b000000001, 0b000001, 0b0000100, 0b00000000011, 0b0000101000, 0b00001011, 0b00001100, 0b000000000101, 0b00000000010010000, 0b00000000010010001, 0b00000000010010010, 0b00000000010010011, 0b0000111, 0b00010, 0b00011, 0b000010101, 0b001000, 0b0011, 0b0100, 0b00001101, 0b00100100, 0b00101, 0b01010, 0b00100101, 0b0000000001001010, 0b00000000010011, 0b0000101001000, 0b0000000001001011, 0b0010011, 0b01011, 0b01100, 0b0110100, 0b011011, 0b100, 0b101, 0b01110, 0b01101010, 0b01111]);
}
#[test]
fn overspecified() {
let mut b = HuffmanDecoder::builder(1);
b.next_code(1).unwrap();
b.next_code(1).unwrap();
assert_eq!(b.next_code(1).err().unwrap().kind(), ErrorKind::Undecodable);
}
fn test_decode(code_lens: &[usize], input: &str, expected: &[u32]) {
let max_code_len = *code_lens.iter().max().unwrap();
// Without long codes.
test_decode_(max_code_len, code_lens, input, expected);
// With long codes.
if max_code_len > 1 {
test_decode_(cmp::max(max_code_len as isize - 4, 1) as usize, code_lens, input, expected);
}
}
fn test_decode_(lookup_table_bits: usize, code_lens: &[usize], input: &str, expected: &[u32]) {
let mut b = HuffmanDecoder::builder(lookup_table_bits);
for (i, &code_len) in code_lens.iter().enumerate() {
b.create_code(i as u32, code_len).unwrap();
}
let d = b.build();
let mut reader = new_bit_reader(input);
for exp in expected {
assert_eq!(d.decode(&mut reader).unwrap(), *exp);
}
}
#[test]
fn decode_1() {
/*
0 2 codeword 00
1 4 codeword 0100
2 4 codeword 0101
3 4 codeword 0110
4 4 codeword 0111
5 2 codeword 10
6 3 codeword 110
7 3 codeword 111 */
test_decode(&[2, 4, 4, 4, 4, 2, 3, 3],
"00 111 0111 0110 110 110 111",
&[0, 7, 4, 3, 6, 6, 7]);
}
#[test]
fn decode_2() {
test_decode(&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
"001000 0000000001001011 100 000001 0000000000 01111 00010 unused: 011011",
&[20, 31, 37, 5, 0, 41, 17]);
}
} | HuffmanDecoderBuilder | identifier_name |
huffman.rs | use std::{cmp, io, usize};
use bitstream::BitRead;
use error::{Error, Result};
use util::{self, Bits};
#[derive(Debug)]
pub struct HuffmanDecoder {
lookup_table: LookupTable,
long_codes: Box<[LongCode]>,
max_code_len: usize,
}
impl HuffmanDecoder {
pub fn builder(lookup_table_bits: usize) -> HuffmanDecoderBuilder {
assert!(lookup_table_bits > 0 && lookup_table_bits < 32);
let lookup_table_len = if lookup_table_bits == 0 {
0
} else {
1 << lookup_table_bits
};
let lookup_entries = vec![LookupEntry::Null; lookup_table_len];
let long_codes = Vec::new();
HuffmanDecoderBuilder {
lookup_table: LookupTable {
entries: lookup_entries.into_boxed_slice(),
len_bits: lookup_table_bits,
},
long_codes: long_codes,
cur_codes: [None; 31],
max_code_len: 0,
}
}
pub fn decode<R: BitRead>(&self, reader: &mut R) -> Result<u32> {
let lookup_len_bits = cmp::min(self.max_code_len, self.lookup_table.len_bits);
let (mut code_bits, mut read) = try!(reader.try_read_u32_bits(lookup_len_bits));
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected EOF while reading Huffman code")));
}
let entry = &self.lookup_table.entries[code_bits as usize];
let code = match entry {
&LookupEntry::Code(code) => code,
&LookupEntry::LongCode => {
let r = try!(reader.try_read_u32_bits(self.max_code_len - lookup_len_bits));
read += r.1;
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
code_bits |= r.0 << lookup_len_bits;
try!(self.find_long_code(code_bits, read))
},
&LookupEntry::Null => return Err(Error::Undecodable("Matched a null Huffman code entry")),
};
if code.len < read {
let unread_len = read - code.len;
let unread_bits = code_bits >> code.len;
reader.unread_u32_bits(unread_bits, unread_len);
} else if code.len > read |
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct HuffmanDecoderBuilder {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if!self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true
};
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
}
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) &!util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos!= 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b0000001, 0b00000001, 0b0000000001000, 0b000000001, 0b000001, 0b0000100, 0b00000000011, 0b0000101000, 0b00001011, 0b00001100, 0b000000000101, 0b00000000010010000, 0b00000000010010001, 0b00000000010010010, 0b00000000010010011, 0b0000111, 0b00010, 0b00011, 0b000010101, 0b001000, 0b0011, 0b0100, 0b00001101, 0b00100100, 0b00101, 0b01010, 0b00100101, 0b0000000001001010, 0b00000000010011, 0b0000101001000, 0b0000000001001011, 0b0010011, 0b01011, 0b01100, 0b0110100, 0b011011, 0b100, 0b101, 0b01110, 0b01101010, 0b01111]);
}
#[test]
fn overspecified() {
let mut b = HuffmanDecoder::builder(1);
b.next_code(1).unwrap();
b.next_code(1).unwrap();
assert_eq!(b.next_code(1).err().unwrap().kind(), ErrorKind::Undecodable);
}
fn test_decode(code_lens: &[usize], input: &str, expected: &[u32]) {
let max_code_len = *code_lens.iter().max().unwrap();
// Without long codes.
test_decode_(max_code_len, code_lens, input, expected);
// With long codes.
if max_code_len > 1 {
test_decode_(cmp::max(max_code_len as isize - 4, 1) as usize, code_lens, input, expected);
}
}
fn test_decode_(lookup_table_bits: usize, code_lens: &[usize], input: &str, expected: &[u32]) {
let mut b = HuffmanDecoder::builder(lookup_table_bits);
for (i, &code_len) in code_lens.iter().enumerate() {
b.create_code(i as u32, code_len).unwrap();
}
let d = b.build();
let mut reader = new_bit_reader(input);
for exp in expected {
assert_eq!(d.decode(&mut reader).unwrap(), *exp);
}
}
#[test]
fn decode_1() {
/*
0 2 codeword 00
1 4 codeword 0100
2 4 codeword 0101
3 4 codeword 0110
4 4 codeword 0111
5 2 codeword 10
6 3 codeword 110
7 3 codeword 111 */
test_decode(&[2, 4, 4, 4, 4, 2, 3, 3],
"00 111 0111 0110 110 110 111",
&[0, 7, 4, 3, 6, 6, 7]);
}
#[test]
fn decode_2() {
test_decode(&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
"001000 0000000001001011 100 000001 0000000000 01111 00010 unused: 011011",
&[20, 31, 37, 5, 0, 41, 17]);
}
} | {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
} | conditional_block |
mod.rs | //! This mod implements `kubernetes_logs` source.
//! The scope of this source is to consume the log files that `kubelet` keeps
//! at `/var/log/pods` at the host of the k8s node when `vector` itself is
//! running inside the cluster as a `DaemonSet`.
#![deny(missing_docs)]
use crate::event::{self, Event};
use crate::internal_events::{KubernetesLogsEventAnnotationFailed, KubernetesLogsEventReceived};
use crate::kubernetes as k8s;
use crate::{
dns::Resolver,
shutdown::ShutdownSignal,
sources,
topology::config::{DataType, GlobalOptions, SourceConfig, SourceDescription},
transforms::Transform,
};
use bytes05::Bytes;
use evmap10::{self as evmap};
use file_source::{FileServer, FileServerShutdown, Fingerprinter};
use futures::{future::FutureExt, sink::Sink, stream::StreamExt};
use futures01::sync::mpsc;
use k8s_openapi::api::core::v1::Pod;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::Duration;
mod k8s_paths_provider;
mod lifecycle;
mod parser;
mod partial_events_merger;
mod path_helpers;
mod pod_metadata_annotator;
mod transform_utils;
mod util;
use k8s_paths_provider::K8sPathsProvider;
use lifecycle::Lifecycle;
use pod_metadata_annotator::PodMetadataAnnotator;
/// The key we use for `file` field.
const FILE_KEY: &str = "file";
/// The `self_node_name` value env var key.
const SELF_NODE_NAME_ENV_KEY: &str = "VECTOR_SELF_NODE_NAME";
/// Configuration for the `kubernetes_logs` source.
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields, default)]
pub struct Config {
/// The `name` of the Kubernetes `Node` that Vector runs at.
/// Required to filter the `Pod`s to only include the ones with the log
/// files accessible locally.
#[serde(default = "default_self_node_name_env_template")]
self_node_name: String,
/// Automatically merge partial events.
#[serde(default = "crate::serde::default_true")]
auto_partial_merge: bool,
/// Specifies the field names for metadata annotation.
annotation_fields: pod_metadata_annotator::FieldsSpec,
}
inventory::submit! {
SourceDescription::new_without_default::<Config>(COMPONENT_NAME)
}
const COMPONENT_NAME: &str = "kubernetes_logs";
#[typetag::serde(name = "kubernetes_logs")]
impl SourceConfig for Config {
fn build(
&self,
name: &str,
globals: &GlobalOptions,
shutdown: ShutdownSignal,
out: mpsc::Sender<Event>,
) -> crate::Result<sources::Source> {
let source = Source::new(self, Resolver, globals, name)?;
// TODO: this is a workaround for the legacy futures 0.1.
// When the core is updated to futures 0.3 this should be simplied
// significantly.
let out = futures::compat::Compat01As03Sink::new(out);
let fut = source.run(out, shutdown);
let fut = fut.map(|result| {
result.map_err(|error| {
error!(message = "source future failed",?error);
})
});
let fut = Box::pin(fut);
let fut = futures::compat::Compat::new(fut);
let fut: sources::Source = Box::new(fut);
Ok(fut)
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn source_type(&self) -> &'static str {
COMPONENT_NAME
}
}
#[derive(Clone)]
struct Source {
client: k8s::client::Client,
self_node_name: String,
data_dir: PathBuf,
auto_partial_merge: bool,
fields_spec: pod_metadata_annotator::FieldsSpec,
}
impl Source {
fn new(
config: &Config,
resolver: Resolver,
globals: &GlobalOptions,
name: &str,
) -> crate::Result<Self> {
let self_node_name = if config.self_node_name.is_empty()
|| config.self_node_name == default_self_node_name_env_template()
{
std::env::var(SELF_NODE_NAME_ENV_KEY).map_err(|_| {
format!(
"self_node_name config value or {} env var is not set",
SELF_NODE_NAME_ENV_KEY
)
})?
} else {
config.self_node_name.clone()
};
info!(
message = "obtained Kubernetes Node name to collect logs for (self)",
?self_node_name
);
let k8s_config = k8s::client::config::Config::in_cluster()?;
let client = k8s::client::Client::new(k8s_config, resolver)?;
let data_dir = globals.resolve_and_make_data_subdir(None, name)?;
Ok(Self {
client,
self_node_name,
data_dir,
auto_partial_merge: config.auto_partial_merge,
fields_spec: config.annotation_fields.clone(),
})
}
async fn run<O>(self, out: O, global_shutdown: ShutdownSignal) -> crate::Result<()>
where
O: Sink<Event> + Send +'static,
<O as Sink<Event>>::Error: std::error::Error,
{
let Self {
client,
self_node_name,
data_dir,
auto_partial_merge,
fields_spec,
} = self;
let field_selector = format!("spec.nodeName={}", self_node_name);
let label_selector = "vector.dev/exclude!=true".to_owned();
let watcher = k8s::api_watcher::ApiWatcher::new(client, Pod::watch_pod_for_all_namespaces);
let watcher = k8s::instrumenting_watcher::InstrumentingWatcher::new(watcher);
let (state_reader, state_writer) = evmap::new();
let state_writer =
k8s::state::evmap::Writer::new(state_writer, Some(Duration::from_millis(10)));
let state_writer = k8s::state::instrumenting::Writer::new(state_writer);
let state_writer =
k8s::state::delayed_delete::Writer::new(state_writer, Duration::from_secs(60));
let mut reflector = k8s::reflector::Reflector::new(
watcher,
state_writer,
Some(field_selector),
Some(label_selector),
Duration::from_secs(1),
);
let reflector_process = reflector.run();
let paths_provider = K8sPathsProvider::new(state_reader.clone());
let annotator = PodMetadataAnnotator::new(state_reader, fields_spec);
// TODO: maybe some of the parameters have to be configurable.
let max_line_bytes = 32 * 1024; // 32 KiB
let file_server = FileServer {
paths_provider,
max_read_bytes: 2048,
start_at_beginning: true,
ignore_before: None,
max_line_bytes,
data_dir,
glob_minimum_cooldown: Duration::from_secs(10),
fingerprinter: Fingerprinter::FirstLineChecksum {
max_line_length: max_line_bytes,
},
oldest_first: false,
remove_after: None,
};
let (file_source_tx, file_source_rx) =
futures::channel::mpsc::channel::<(Bytes, String)>(100);
let mut parser = parser::build();
let mut partial_events_merger = partial_events_merger::build(auto_partial_merge);
let events = file_source_rx.map(move |(bytes, file)| {
emit!(KubernetesLogsEventReceived {
file: &file,
byte_size: bytes.len(),
});
let mut event = create_event(bytes, &file);
if annotator.annotate(&mut event, &file).is_none() {
emit!(KubernetesLogsEventAnnotationFailed { event: &event });
}
event
});
let events = events
.filter_map(move |event| futures::future::ready(parser.transform(event)))
.filter_map(move |event| {
futures::future::ready(partial_events_merger.transform(event))
});
let event_processing_loop = events.map(Ok).forward(out);
let mut lifecycle = Lifecycle::new();
{
let (slot, shutdown) = lifecycle.add();
let fut =
util::cancel_on_signal(reflector_process, shutdown).map(|result| match result {
Ok(()) => info!(message = "reflector process completed gracefully"),
Err(error) => {
error!(message = "reflector process exited with an error",?error)
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::run_file_server(file_server, file_source_tx, shutdown).map(|result| {
match result {
Ok(FileServerShutdown) => info!(message = "file server completed gracefully"),
Err(error) => error!(message = "file server exited with an error",?error),
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::complete_with_deadline_on_signal(
event_processing_loop,
shutdown,
Duration::from_secs(30), // more than enough time to propagate
)
.map(|result| {
match result {
Ok(Ok(())) => info!(message = "event processing loop completed gracefully"),
Ok(Err(error)) => error!(
message = "event processing loop exited with an error",
?error
),
Err(error) => error!(
message = "event processing loop timed out during the shutdown",
?error
),
};
});
slot.bind(Box::pin(fut));
}
lifecycle.run(global_shutdown).await;
info!(message = "done");
Ok(())
}
}
fn create_event(line: Bytes, file: &str) -> Event |
/// This function returns the default value for `self_node_name` variable
/// as it should be at the generated config file.
fn default_self_node_name_env_template() -> String {
format!("${{{}}}", SELF_NODE_NAME_ENV_KEY)
}
| {
let mut event = Event::from(line);
// Add source type.
event
.as_mut_log()
.insert(event::log_schema().source_type_key(), COMPONENT_NAME);
// Add file.
event.as_mut_log().insert(FILE_KEY, file);
event
} | identifier_body |
mod.rs | //! This mod implements `kubernetes_logs` source.
//! The scope of this source is to consume the log files that `kubelet` keeps
//! at `/var/log/pods` at the host of the k8s node when `vector` itself is
//! running inside the cluster as a `DaemonSet`.
#![deny(missing_docs)]
use crate::event::{self, Event};
use crate::internal_events::{KubernetesLogsEventAnnotationFailed, KubernetesLogsEventReceived};
use crate::kubernetes as k8s;
use crate::{
dns::Resolver,
shutdown::ShutdownSignal,
sources,
topology::config::{DataType, GlobalOptions, SourceConfig, SourceDescription},
transforms::Transform,
};
use bytes05::Bytes;
use evmap10::{self as evmap};
use file_source::{FileServer, FileServerShutdown, Fingerprinter};
use futures::{future::FutureExt, sink::Sink, stream::StreamExt};
use futures01::sync::mpsc;
use k8s_openapi::api::core::v1::Pod;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::Duration;
mod k8s_paths_provider;
mod lifecycle;
mod parser;
mod partial_events_merger;
mod path_helpers;
mod pod_metadata_annotator;
mod transform_utils;
mod util;
use k8s_paths_provider::K8sPathsProvider;
use lifecycle::Lifecycle;
use pod_metadata_annotator::PodMetadataAnnotator;
/// The key we use for `file` field.
const FILE_KEY: &str = "file";
/// The `self_node_name` value env var key.
const SELF_NODE_NAME_ENV_KEY: &str = "VECTOR_SELF_NODE_NAME";
/// Configuration for the `kubernetes_logs` source.
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields, default)]
pub struct Config {
/// The `name` of the Kubernetes `Node` that Vector runs at.
/// Required to filter the `Pod`s to only include the ones with the log
/// files accessible locally.
#[serde(default = "default_self_node_name_env_template")]
self_node_name: String,
/// Automatically merge partial events.
#[serde(default = "crate::serde::default_true")]
auto_partial_merge: bool,
/// Specifies the field names for metadata annotation.
annotation_fields: pod_metadata_annotator::FieldsSpec,
}
inventory::submit! {
SourceDescription::new_without_default::<Config>(COMPONENT_NAME)
}
const COMPONENT_NAME: &str = "kubernetes_logs";
#[typetag::serde(name = "kubernetes_logs")]
impl SourceConfig for Config {
fn build(
&self,
name: &str,
globals: &GlobalOptions,
shutdown: ShutdownSignal,
out: mpsc::Sender<Event>,
) -> crate::Result<sources::Source> {
let source = Source::new(self, Resolver, globals, name)?;
// TODO: this is a workaround for the legacy futures 0.1.
// When the core is updated to futures 0.3 this should be simplied
// significantly.
let out = futures::compat::Compat01As03Sink::new(out);
let fut = source.run(out, shutdown);
let fut = fut.map(|result| {
result.map_err(|error| {
error!(message = "source future failed",?error);
})
});
let fut = Box::pin(fut);
let fut = futures::compat::Compat::new(fut);
let fut: sources::Source = Box::new(fut);
Ok(fut)
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn source_type(&self) -> &'static str {
COMPONENT_NAME
}
}
#[derive(Clone)]
struct Source {
client: k8s::client::Client,
self_node_name: String,
data_dir: PathBuf,
auto_partial_merge: bool,
fields_spec: pod_metadata_annotator::FieldsSpec,
}
impl Source {
fn new(
config: &Config,
resolver: Resolver,
globals: &GlobalOptions,
name: &str,
) -> crate::Result<Self> {
let self_node_name = if config.self_node_name.is_empty()
|| config.self_node_name == default_self_node_name_env_template()
{
std::env::var(SELF_NODE_NAME_ENV_KEY).map_err(|_| {
format!(
"self_node_name config value or {} env var is not set",
SELF_NODE_NAME_ENV_KEY
)
})?
} else {
config.self_node_name.clone()
};
info!(
message = "obtained Kubernetes Node name to collect logs for (self)",
?self_node_name
);
let k8s_config = k8s::client::config::Config::in_cluster()?;
let client = k8s::client::Client::new(k8s_config, resolver)?;
let data_dir = globals.resolve_and_make_data_subdir(None, name)?;
Ok(Self {
client,
self_node_name,
data_dir,
auto_partial_merge: config.auto_partial_merge,
fields_spec: config.annotation_fields.clone(),
})
}
async fn | <O>(self, out: O, global_shutdown: ShutdownSignal) -> crate::Result<()>
where
O: Sink<Event> + Send +'static,
<O as Sink<Event>>::Error: std::error::Error,
{
let Self {
client,
self_node_name,
data_dir,
auto_partial_merge,
fields_spec,
} = self;
let field_selector = format!("spec.nodeName={}", self_node_name);
let label_selector = "vector.dev/exclude!=true".to_owned();
let watcher = k8s::api_watcher::ApiWatcher::new(client, Pod::watch_pod_for_all_namespaces);
let watcher = k8s::instrumenting_watcher::InstrumentingWatcher::new(watcher);
let (state_reader, state_writer) = evmap::new();
let state_writer =
k8s::state::evmap::Writer::new(state_writer, Some(Duration::from_millis(10)));
let state_writer = k8s::state::instrumenting::Writer::new(state_writer);
let state_writer =
k8s::state::delayed_delete::Writer::new(state_writer, Duration::from_secs(60));
let mut reflector = k8s::reflector::Reflector::new(
watcher,
state_writer,
Some(field_selector),
Some(label_selector),
Duration::from_secs(1),
);
let reflector_process = reflector.run();
let paths_provider = K8sPathsProvider::new(state_reader.clone());
let annotator = PodMetadataAnnotator::new(state_reader, fields_spec);
// TODO: maybe some of the parameters have to be configurable.
let max_line_bytes = 32 * 1024; // 32 KiB
let file_server = FileServer {
paths_provider,
max_read_bytes: 2048,
start_at_beginning: true,
ignore_before: None,
max_line_bytes,
data_dir,
glob_minimum_cooldown: Duration::from_secs(10),
fingerprinter: Fingerprinter::FirstLineChecksum {
max_line_length: max_line_bytes,
},
oldest_first: false,
remove_after: None,
};
let (file_source_tx, file_source_rx) =
futures::channel::mpsc::channel::<(Bytes, String)>(100);
let mut parser = parser::build();
let mut partial_events_merger = partial_events_merger::build(auto_partial_merge);
let events = file_source_rx.map(move |(bytes, file)| {
emit!(KubernetesLogsEventReceived {
file: &file,
byte_size: bytes.len(),
});
let mut event = create_event(bytes, &file);
if annotator.annotate(&mut event, &file).is_none() {
emit!(KubernetesLogsEventAnnotationFailed { event: &event });
}
event
});
let events = events
.filter_map(move |event| futures::future::ready(parser.transform(event)))
.filter_map(move |event| {
futures::future::ready(partial_events_merger.transform(event))
});
let event_processing_loop = events.map(Ok).forward(out);
let mut lifecycle = Lifecycle::new();
{
let (slot, shutdown) = lifecycle.add();
let fut =
util::cancel_on_signal(reflector_process, shutdown).map(|result| match result {
Ok(()) => info!(message = "reflector process completed gracefully"),
Err(error) => {
error!(message = "reflector process exited with an error",?error)
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::run_file_server(file_server, file_source_tx, shutdown).map(|result| {
match result {
Ok(FileServerShutdown) => info!(message = "file server completed gracefully"),
Err(error) => error!(message = "file server exited with an error",?error),
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::complete_with_deadline_on_signal(
event_processing_loop,
shutdown,
Duration::from_secs(30), // more than enough time to propagate
)
.map(|result| {
match result {
Ok(Ok(())) => info!(message = "event processing loop completed gracefully"),
Ok(Err(error)) => error!(
message = "event processing loop exited with an error",
?error
),
Err(error) => error!(
message = "event processing loop timed out during the shutdown",
?error
),
};
});
slot.bind(Box::pin(fut));
}
lifecycle.run(global_shutdown).await;
info!(message = "done");
Ok(())
}
}
fn create_event(line: Bytes, file: &str) -> Event {
let mut event = Event::from(line);
// Add source type.
event
.as_mut_log()
.insert(event::log_schema().source_type_key(), COMPONENT_NAME);
// Add file.
event.as_mut_log().insert(FILE_KEY, file);
event
}
/// This function returns the default value for `self_node_name` variable
/// as it should be at the generated config file.
fn default_self_node_name_env_template() -> String {
format!("${{{}}}", SELF_NODE_NAME_ENV_KEY)
}
| run | identifier_name |
mod.rs | //! This mod implements `kubernetes_logs` source.
//! The scope of this source is to consume the log files that `kubelet` keeps
//! at `/var/log/pods` at the host of the k8s node when `vector` itself is
//! running inside the cluster as a `DaemonSet`.
#![deny(missing_docs)]
use crate::event::{self, Event};
use crate::internal_events::{KubernetesLogsEventAnnotationFailed, KubernetesLogsEventReceived};
use crate::kubernetes as k8s;
use crate::{
dns::Resolver,
shutdown::ShutdownSignal,
sources,
topology::config::{DataType, GlobalOptions, SourceConfig, SourceDescription},
transforms::Transform,
};
use bytes05::Bytes;
use evmap10::{self as evmap};
use file_source::{FileServer, FileServerShutdown, Fingerprinter};
use futures::{future::FutureExt, sink::Sink, stream::StreamExt};
use futures01::sync::mpsc;
use k8s_openapi::api::core::v1::Pod;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::Duration;
mod k8s_paths_provider;
mod lifecycle;
mod parser;
mod partial_events_merger;
mod path_helpers;
mod pod_metadata_annotator;
mod transform_utils;
mod util;
use k8s_paths_provider::K8sPathsProvider;
use lifecycle::Lifecycle;
use pod_metadata_annotator::PodMetadataAnnotator;
/// The key we use for `file` field.
const FILE_KEY: &str = "file";
/// The `self_node_name` value env var key.
const SELF_NODE_NAME_ENV_KEY: &str = "VECTOR_SELF_NODE_NAME";
/// Configuration for the `kubernetes_logs` source.
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields, default)]
pub struct Config {
/// The `name` of the Kubernetes `Node` that Vector runs at.
/// Required to filter the `Pod`s to only include the ones with the log
/// files accessible locally.
#[serde(default = "default_self_node_name_env_template")]
self_node_name: String,
/// Automatically merge partial events.
#[serde(default = "crate::serde::default_true")]
auto_partial_merge: bool,
/// Specifies the field names for metadata annotation.
annotation_fields: pod_metadata_annotator::FieldsSpec,
}
inventory::submit! {
SourceDescription::new_without_default::<Config>(COMPONENT_NAME)
}
const COMPONENT_NAME: &str = "kubernetes_logs";
#[typetag::serde(name = "kubernetes_logs")]
impl SourceConfig for Config {
fn build(
&self,
name: &str,
globals: &GlobalOptions,
shutdown: ShutdownSignal,
out: mpsc::Sender<Event>,
) -> crate::Result<sources::Source> {
let source = Source::new(self, Resolver, globals, name)?;
// TODO: this is a workaround for the legacy futures 0.1.
// When the core is updated to futures 0.3 this should be simplied
// significantly.
let out = futures::compat::Compat01As03Sink::new(out);
let fut = source.run(out, shutdown);
let fut = fut.map(|result| {
result.map_err(|error| {
error!(message = "source future failed",?error);
})
});
let fut = Box::pin(fut);
let fut = futures::compat::Compat::new(fut);
let fut: sources::Source = Box::new(fut);
Ok(fut)
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn source_type(&self) -> &'static str {
COMPONENT_NAME
}
}
#[derive(Clone)]
struct Source {
client: k8s::client::Client,
self_node_name: String,
data_dir: PathBuf,
auto_partial_merge: bool,
fields_spec: pod_metadata_annotator::FieldsSpec,
}
impl Source {
fn new(
config: &Config,
resolver: Resolver,
globals: &GlobalOptions,
name: &str,
) -> crate::Result<Self> {
let self_node_name = if config.self_node_name.is_empty()
|| config.self_node_name == default_self_node_name_env_template()
{
std::env::var(SELF_NODE_NAME_ENV_KEY).map_err(|_| {
format!(
"self_node_name config value or {} env var is not set",
SELF_NODE_NAME_ENV_KEY
)
})?
} else {
config.self_node_name.clone()
};
info!(
message = "obtained Kubernetes Node name to collect logs for (self)",
?self_node_name
);
let k8s_config = k8s::client::config::Config::in_cluster()?;
let client = k8s::client::Client::new(k8s_config, resolver)?;
let data_dir = globals.resolve_and_make_data_subdir(None, name)?;
Ok(Self {
client,
self_node_name,
data_dir,
auto_partial_merge: config.auto_partial_merge,
fields_spec: config.annotation_fields.clone(),
})
}
async fn run<O>(self, out: O, global_shutdown: ShutdownSignal) -> crate::Result<()>
where
O: Sink<Event> + Send +'static,
<O as Sink<Event>>::Error: std::error::Error,
{
let Self {
client,
self_node_name,
data_dir,
auto_partial_merge,
fields_spec,
} = self;
let field_selector = format!("spec.nodeName={}", self_node_name);
let label_selector = "vector.dev/exclude!=true".to_owned();
let watcher = k8s::api_watcher::ApiWatcher::new(client, Pod::watch_pod_for_all_namespaces);
let watcher = k8s::instrumenting_watcher::InstrumentingWatcher::new(watcher);
let (state_reader, state_writer) = evmap::new();
let state_writer =
k8s::state::evmap::Writer::new(state_writer, Some(Duration::from_millis(10)));
let state_writer = k8s::state::instrumenting::Writer::new(state_writer);
let state_writer =
k8s::state::delayed_delete::Writer::new(state_writer, Duration::from_secs(60));
let mut reflector = k8s::reflector::Reflector::new(
watcher,
state_writer,
Some(field_selector),
Some(label_selector),
Duration::from_secs(1),
);
let reflector_process = reflector.run();
let paths_provider = K8sPathsProvider::new(state_reader.clone());
let annotator = PodMetadataAnnotator::new(state_reader, fields_spec);
// TODO: maybe some of the parameters have to be configurable.
let max_line_bytes = 32 * 1024; // 32 KiB
let file_server = FileServer {
paths_provider,
max_read_bytes: 2048,
start_at_beginning: true,
ignore_before: None,
max_line_bytes,
data_dir,
glob_minimum_cooldown: Duration::from_secs(10),
fingerprinter: Fingerprinter::FirstLineChecksum {
max_line_length: max_line_bytes,
},
oldest_first: false,
remove_after: None,
};
let (file_source_tx, file_source_rx) =
futures::channel::mpsc::channel::<(Bytes, String)>(100);
let mut parser = parser::build();
let mut partial_events_merger = partial_events_merger::build(auto_partial_merge);
let events = file_source_rx.map(move |(bytes, file)| {
emit!(KubernetesLogsEventReceived {
file: &file,
byte_size: bytes.len(),
});
let mut event = create_event(bytes, &file); | emit!(KubernetesLogsEventAnnotationFailed { event: &event });
}
event
});
let events = events
.filter_map(move |event| futures::future::ready(parser.transform(event)))
.filter_map(move |event| {
futures::future::ready(partial_events_merger.transform(event))
});
let event_processing_loop = events.map(Ok).forward(out);
let mut lifecycle = Lifecycle::new();
{
let (slot, shutdown) = lifecycle.add();
let fut =
util::cancel_on_signal(reflector_process, shutdown).map(|result| match result {
Ok(()) => info!(message = "reflector process completed gracefully"),
Err(error) => {
error!(message = "reflector process exited with an error",?error)
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::run_file_server(file_server, file_source_tx, shutdown).map(|result| {
match result {
Ok(FileServerShutdown) => info!(message = "file server completed gracefully"),
Err(error) => error!(message = "file server exited with an error",?error),
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::complete_with_deadline_on_signal(
event_processing_loop,
shutdown,
Duration::from_secs(30), // more than enough time to propagate
)
.map(|result| {
match result {
Ok(Ok(())) => info!(message = "event processing loop completed gracefully"),
Ok(Err(error)) => error!(
message = "event processing loop exited with an error",
?error
),
Err(error) => error!(
message = "event processing loop timed out during the shutdown",
?error
),
};
});
slot.bind(Box::pin(fut));
}
lifecycle.run(global_shutdown).await;
info!(message = "done");
Ok(())
}
}
fn create_event(line: Bytes, file: &str) -> Event {
let mut event = Event::from(line);
// Add source type.
event
.as_mut_log()
.insert(event::log_schema().source_type_key(), COMPONENT_NAME);
// Add file.
event.as_mut_log().insert(FILE_KEY, file);
event
}
/// This function returns the default value for `self_node_name` variable
/// as it should be at the generated config file.
fn default_self_node_name_env_template() -> String {
format!("${{{}}}", SELF_NODE_NAME_ENV_KEY)
} | if annotator.annotate(&mut event, &file).is_none() { | random_line_split |
trace_context.rs | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! TracingContext is the context of the tracing process. Span should only be
//! created through context, and be archived into the context after the span
//! finished.
use crate::{
common::{
random_generator::RandomGenerator,
system_time::{fetch_time, TimePeriod},
wait_group::WaitGroup,
},
error::LOCK_MSG,
proto::v3::{RefType, SegmentObject, SegmentReference, SpanLayer, SpanObject, SpanType},
trace::{
propagation::context::PropagationContext,
span::{HandleSpanObject, Span},
tracer::{Tracer, WeakTracer},
},
};
use parking_lot::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
use std::{
fmt::Formatter,
mem::take,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
/// The span uid is to identify the [Span] for crate.
pub(crate) type SpanUid = usize;
pub(crate) struct ActiveSpan {
uid: SpanUid,
span_id: i32,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl ActiveSpan {
fn new(uid: SpanUid, span_id: i32) -> Self {
Self {
uid,
span_id,
r#ref: None,
}
}
#[inline]
pub(crate) fn uid(&self) -> SpanUid {
self.uid
}
}
pub(crate) struct FinalizeSpan {
uid: SpanUid,
/// When the span is [AsyncSpan] and unfinished, it is None.
obj: Option<SpanObject>,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl FinalizeSpan {
pub(crate) fn new(
uid: usize,
obj: Option<SpanObject>,
r#ref: Option<SegmentReference>,
) -> Self {
Self { uid, obj, r#ref }
}
}
#[derive(Default)]
pub(crate) struct SpanStack {
pub(crate) finalized: RwLock<Vec<FinalizeSpan>>,
pub(crate) active: RwLock<Vec<ActiveSpan>>,
}
impl SpanStack {
pub(crate) fn finalized(&self) -> RwLockReadGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_read().expect(LOCK_MSG)
}
pub(crate) fn finalized_mut(&self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.active.try_read().expect(LOCK_MSG)
}
pub(crate) fn active_mut(&self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.active.try_write().expect(LOCK_MSG)
}
fn pop_active(&self, uid: SpanUid) -> Option<ActiveSpan> {
let mut stack = self.active_mut();
if stack
.last()
.map(|span| span.uid() == uid)
.unwrap_or_default()
{
stack.pop()
} else {
None
}
}
/// Close span. We can't use closed span after finalize called.
pub(crate) fn finalize_span(&self, uid: SpanUid, obj: Option<SpanObject>) {
let Some(active_span) = self.pop_active(uid) else {
panic!("Finalize span isn't the active span");
};
let finalize_span = match obj {
Some(mut obj) => {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = active_span.r#ref {
obj.refs.push(r#ref);
}
FinalizeSpan::new(uid, Some(obj), None)
}
None => FinalizeSpan::new(uid, None, active_span.r#ref),
};
self.finalized_mut().push(finalize_span);
}
/// Close async span, fill the span object.
pub(crate) fn finalize_async_span(&self, uid: SpanUid, mut obj: SpanObject) {
for finalize_span in &mut *self.finalized_mut() {
if finalize_span.uid == uid {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = take(&mut finalize_span.r#ref) {
obj.refs.push(r#ref);
}
finalize_span.obj = Some(obj);
return;
}
}
unreachable!()
}
}
/// TracingContext is the context of the tracing process. Span should only be
/// created through context, and be archived into the context after the span
/// finished.
#[must_use = "call `create_entry_span` after `TracingContext` created."]
pub struct TracingContext {
trace_id: String,
trace_segment_id: String,
service: String,
service_instance: String,
next_span_id: i32,
span_stack: Arc<SpanStack>,
primary_endpoint_name: String,
span_uid_generator: AtomicUsize,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service
}
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str {
&self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
}
}
/// Wait all async span dropped which, created by [Span::prepare_for_async].
pub fn wait(self) {
self.wg.clone().wait();
}
/// It converts tracing context into segment object.
/// This conversion should be done before sending segments into OAP.
///
/// Notice: The spans will be taken, so this method shouldn't be called
/// twice.
pub(crate) fn convert_to_segment_object(&mut self) -> SegmentObject {
let trace_id = self.trace_id().to_owned();
let trace_segment_id = self.trace_segment_id().to_owned();
let service = self.service().to_owned();
let service_instance = self.service_instance().to_owned();
let spans = take(&mut *self.finalize_spans_mut());
let spans = spans
.into_iter()
.map(|span| span.obj.expect("Some async span haven't finished"))
.collect();
SegmentObject {
trace_id,
trace_segment_id,
spans,
service,
service_instance,
is_size_limited: false,
}
}
pub(crate) fn | (&self) -> Option<i32> {
self.active_span().map(|span| span.span_id)
}
fn push_active_span(&mut self, span: &SpanObject) -> SpanUid {
let uid = self.generate_span_uid();
self.primary_endpoint_name = span.operation_name.clone();
let mut stack = self.active_span_stack_mut();
stack.push(ActiveSpan::new(uid, span.span_id));
uid
}
fn upgrade_tracer(&self) -> Tracer {
self.tracer.upgrade().expect("Tracer has dropped")
}
}
impl Drop for TracingContext {
/// Convert to segment object, and send to tracer for reporting.
///
/// # Panics
///
/// Panic if tracer is dropped.
fn drop(&mut self) {
self.upgrade_tracer().finalize_context(self)
}
}
/// Cross threads context snapshot.
#[derive(Debug)]
pub struct ContextSnapshot {
trace_id: String,
trace_segment_id: String,
span_id: i32,
parent_endpoint: String,
}
impl ContextSnapshot {
/// Check if the snapshot is created from current context.
pub fn is_from_current(&self, context: &TracingContext) -> bool {
!self.trace_segment_id.is_empty() && self.trace_segment_id == context.trace_segment_id()
}
/// Check if the snapshot is valid.
pub fn is_valid(&self) -> bool {
!self.trace_segment_id.is_empty() && self.span_id > -1 &&!self.trace_id.is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
trait AssertSend: Send {}
impl AssertSend for TracingContext {}
}
| peek_active_span_id | identifier_name |
trace_context.rs | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! TracingContext is the context of the tracing process. Span should only be
//! created through context, and be archived into the context after the span
//! finished.
use crate::{
common::{
random_generator::RandomGenerator,
system_time::{fetch_time, TimePeriod},
wait_group::WaitGroup,
},
error::LOCK_MSG,
proto::v3::{RefType, SegmentObject, SegmentReference, SpanLayer, SpanObject, SpanType},
trace::{
propagation::context::PropagationContext,
span::{HandleSpanObject, Span},
tracer::{Tracer, WeakTracer},
},
};
use parking_lot::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
use std::{
fmt::Formatter,
mem::take,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
/// The span uid is to identify the [Span] for crate.
pub(crate) type SpanUid = usize;
pub(crate) struct ActiveSpan {
uid: SpanUid,
span_id: i32,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl ActiveSpan {
fn new(uid: SpanUid, span_id: i32) -> Self {
Self {
uid,
span_id,
r#ref: None,
}
}
#[inline]
pub(crate) fn uid(&self) -> SpanUid {
self.uid
}
}
pub(crate) struct FinalizeSpan {
uid: SpanUid,
/// When the span is [AsyncSpan] and unfinished, it is None.
obj: Option<SpanObject>,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl FinalizeSpan {
pub(crate) fn new(
uid: usize,
obj: Option<SpanObject>,
r#ref: Option<SegmentReference>,
) -> Self {
Self { uid, obj, r#ref }
}
}
#[derive(Default)]
pub(crate) struct SpanStack {
pub(crate) finalized: RwLock<Vec<FinalizeSpan>>,
pub(crate) active: RwLock<Vec<ActiveSpan>>,
}
impl SpanStack {
pub(crate) fn finalized(&self) -> RwLockReadGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_read().expect(LOCK_MSG)
}
pub(crate) fn finalized_mut(&self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.active.try_read().expect(LOCK_MSG)
}
pub(crate) fn active_mut(&self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.active.try_write().expect(LOCK_MSG)
}
fn pop_active(&self, uid: SpanUid) -> Option<ActiveSpan> {
let mut stack = self.active_mut();
if stack
.last()
.map(|span| span.uid() == uid)
.unwrap_or_default()
{
stack.pop()
} else {
None
}
}
/// Close span. We can't use closed span after finalize called.
pub(crate) fn finalize_span(&self, uid: SpanUid, obj: Option<SpanObject>) {
let Some(active_span) = self.pop_active(uid) else {
panic!("Finalize span isn't the active span");
};
let finalize_span = match obj {
Some(mut obj) => {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = active_span.r#ref {
obj.refs.push(r#ref);
}
FinalizeSpan::new(uid, Some(obj), None)
}
None => FinalizeSpan::new(uid, None, active_span.r#ref),
};
self.finalized_mut().push(finalize_span);
}
/// Close async span, fill the span object.
pub(crate) fn finalize_async_span(&self, uid: SpanUid, mut obj: SpanObject) {
for finalize_span in &mut *self.finalized_mut() {
if finalize_span.uid == uid {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = take(&mut finalize_span.r#ref) {
obj.refs.push(r#ref);
}
finalize_span.obj = Some(obj);
return;
}
}
unreachable!()
}
}
/// TracingContext is the context of the tracing process. Span should only be
/// created through context, and be archived into the context after the span
/// finished.
#[must_use = "call `create_entry_span` after `TracingContext` created."]
pub struct TracingContext {
trace_id: String,
trace_segment_id: String,
service: String,
service_instance: String,
next_span_id: i32,
span_stack: Arc<SpanStack>,
primary_endpoint_name: String,
span_uid_generator: AtomicUsize,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service
}
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str {
&self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
}
}
/// Wait all async span dropped which, created by [Span::prepare_for_async].
pub fn wait(self) {
self.wg.clone().wait();
}
/// It converts tracing context into segment object.
/// This conversion should be done before sending segments into OAP.
///
/// Notice: The spans will be taken, so this method shouldn't be called
/// twice.
pub(crate) fn convert_to_segment_object(&mut self) -> SegmentObject {
let trace_id = self.trace_id().to_owned();
let trace_segment_id = self.trace_segment_id().to_owned();
let service = self.service().to_owned();
let service_instance = self.service_instance().to_owned();
let spans = take(&mut *self.finalize_spans_mut());
let spans = spans
.into_iter()
.map(|span| span.obj.expect("Some async span haven't finished"))
.collect();
SegmentObject {
trace_id,
trace_segment_id,
spans,
service,
service_instance,
is_size_limited: false,
}
}
pub(crate) fn peek_active_span_id(&self) -> Option<i32> {
self.active_span().map(|span| span.span_id)
}
fn push_active_span(&mut self, span: &SpanObject) -> SpanUid {
let uid = self.generate_span_uid();
self.primary_endpoint_name = span.operation_name.clone();
let mut stack = self.active_span_stack_mut();
stack.push(ActiveSpan::new(uid, span.span_id));
uid
}
fn upgrade_tracer(&self) -> Tracer |
}
impl Drop for TracingContext {
/// Convert to segment object, and send to tracer for reporting.
///
/// # Panics
///
/// Panic if tracer is dropped.
fn drop(&mut self) {
self.upgrade_tracer().finalize_context(self)
}
}
/// Cross threads context snapshot.
#[derive(Debug)]
pub struct ContextSnapshot {
trace_id: String,
trace_segment_id: String,
span_id: i32,
parent_endpoint: String,
}
impl ContextSnapshot {
/// Check if the snapshot is created from current context.
pub fn is_from_current(&self, context: &TracingContext) -> bool {
!self.trace_segment_id.is_empty() && self.trace_segment_id == context.trace_segment_id()
}
/// Check if the snapshot is valid.
pub fn is_valid(&self) -> bool {
!self.trace_segment_id.is_empty() && self.span_id > -1 &&!self.trace_id.is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
trait AssertSend: Send {}
impl AssertSend for TracingContext {}
}
| {
self.tracer.upgrade().expect("Tracer has dropped")
} | identifier_body |
trace_context.rs | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! TracingContext is the context of the tracing process. Span should only be
//! created through context, and be archived into the context after the span
//! finished.
use crate::{
common::{
random_generator::RandomGenerator,
system_time::{fetch_time, TimePeriod},
wait_group::WaitGroup,
},
error::LOCK_MSG,
proto::v3::{RefType, SegmentObject, SegmentReference, SpanLayer, SpanObject, SpanType},
trace::{
propagation::context::PropagationContext,
span::{HandleSpanObject, Span},
tracer::{Tracer, WeakTracer},
},
};
use parking_lot::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
use std::{
fmt::Formatter,
mem::take,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
/// The span uid is to identify the [Span] for crate.
pub(crate) type SpanUid = usize;
pub(crate) struct ActiveSpan {
uid: SpanUid,
span_id: i32,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl ActiveSpan {
fn new(uid: SpanUid, span_id: i32) -> Self {
Self {
uid,
span_id,
r#ref: None,
}
}
#[inline]
pub(crate) fn uid(&self) -> SpanUid {
self.uid
}
}
pub(crate) struct FinalizeSpan {
uid: SpanUid,
/// When the span is [AsyncSpan] and unfinished, it is None.
obj: Option<SpanObject>,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl FinalizeSpan {
pub(crate) fn new(
uid: usize,
obj: Option<SpanObject>,
r#ref: Option<SegmentReference>,
) -> Self {
Self { uid, obj, r#ref }
}
}
#[derive(Default)]
pub(crate) struct SpanStack {
pub(crate) finalized: RwLock<Vec<FinalizeSpan>>,
pub(crate) active: RwLock<Vec<ActiveSpan>>,
}
impl SpanStack {
pub(crate) fn finalized(&self) -> RwLockReadGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_read().expect(LOCK_MSG)
}
pub(crate) fn finalized_mut(&self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.active.try_read().expect(LOCK_MSG)
}
pub(crate) fn active_mut(&self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.active.try_write().expect(LOCK_MSG)
}
fn pop_active(&self, uid: SpanUid) -> Option<ActiveSpan> {
let mut stack = self.active_mut();
if stack
.last()
.map(|span| span.uid() == uid)
.unwrap_or_default()
{
stack.pop()
} else {
None
}
}
/// Close span. We can't use closed span after finalize called.
pub(crate) fn finalize_span(&self, uid: SpanUid, obj: Option<SpanObject>) {
let Some(active_span) = self.pop_active(uid) else {
panic!("Finalize span isn't the active span");
};
let finalize_span = match obj {
Some(mut obj) => {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = active_span.r#ref {
obj.refs.push(r#ref);
}
FinalizeSpan::new(uid, Some(obj), None)
}
None => FinalizeSpan::new(uid, None, active_span.r#ref),
};
self.finalized_mut().push(finalize_span);
}
/// Close async span, fill the span object.
pub(crate) fn finalize_async_span(&self, uid: SpanUid, mut obj: SpanObject) {
for finalize_span in &mut *self.finalized_mut() {
if finalize_span.uid == uid {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = take(&mut finalize_span.r#ref) {
obj.refs.push(r#ref);
}
finalize_span.obj = Some(obj);
return;
}
}
unreachable!()
}
}
/// TracingContext is the context of the tracing process. Span should only be
/// created through context, and be archived into the context after the span
/// finished.
#[must_use = "call `create_entry_span` after `TracingContext` created."]
pub struct TracingContext {
trace_id: String,
trace_segment_id: String,
service: String,
service_instance: String,
next_span_id: i32,
span_stack: Arc<SpanStack>,
primary_endpoint_name: String,
span_uid_generator: AtomicUsize,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service | &self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
}
}
/// Wait all async span dropped which, created by [Span::prepare_for_async].
pub fn wait(self) {
self.wg.clone().wait();
}
/// It converts tracing context into segment object.
/// This conversion should be done before sending segments into OAP.
///
/// Notice: The spans will be taken, so this method shouldn't be called
/// twice.
pub(crate) fn convert_to_segment_object(&mut self) -> SegmentObject {
let trace_id = self.trace_id().to_owned();
let trace_segment_id = self.trace_segment_id().to_owned();
let service = self.service().to_owned();
let service_instance = self.service_instance().to_owned();
let spans = take(&mut *self.finalize_spans_mut());
let spans = spans
.into_iter()
.map(|span| span.obj.expect("Some async span haven't finished"))
.collect();
SegmentObject {
trace_id,
trace_segment_id,
spans,
service,
service_instance,
is_size_limited: false,
}
}
pub(crate) fn peek_active_span_id(&self) -> Option<i32> {
self.active_span().map(|span| span.span_id)
}
fn push_active_span(&mut self, span: &SpanObject) -> SpanUid {
let uid = self.generate_span_uid();
self.primary_endpoint_name = span.operation_name.clone();
let mut stack = self.active_span_stack_mut();
stack.push(ActiveSpan::new(uid, span.span_id));
uid
}
fn upgrade_tracer(&self) -> Tracer {
self.tracer.upgrade().expect("Tracer has dropped")
}
}
impl Drop for TracingContext {
/// Convert to segment object, and send to tracer for reporting.
///
/// # Panics
///
/// Panic if tracer is dropped.
fn drop(&mut self) {
self.upgrade_tracer().finalize_context(self)
}
}
/// Cross threads context snapshot.
#[derive(Debug)]
pub struct ContextSnapshot {
trace_id: String,
trace_segment_id: String,
span_id: i32,
parent_endpoint: String,
}
impl ContextSnapshot {
/// Check if the snapshot is created from current context.
pub fn is_from_current(&self, context: &TracingContext) -> bool {
!self.trace_segment_id.is_empty() && self.trace_segment_id == context.trace_segment_id()
}
/// Check if the snapshot is valid.
pub fn is_valid(&self) -> bool {
!self.trace_segment_id.is_empty() && self.span_id > -1 &&!self.trace_id.is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
trait AssertSend: Send {}
impl AssertSend for TracingContext {}
} | }
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str { | random_line_split |
trace_context.rs | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! TracingContext is the context of the tracing process. Span should only be
//! created through context, and be archived into the context after the span
//! finished.
use crate::{
common::{
random_generator::RandomGenerator,
system_time::{fetch_time, TimePeriod},
wait_group::WaitGroup,
},
error::LOCK_MSG,
proto::v3::{RefType, SegmentObject, SegmentReference, SpanLayer, SpanObject, SpanType},
trace::{
propagation::context::PropagationContext,
span::{HandleSpanObject, Span},
tracer::{Tracer, WeakTracer},
},
};
use parking_lot::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
use std::{
fmt::Formatter,
mem::take,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
/// The span uid is to identify the [Span] for crate.
pub(crate) type SpanUid = usize;
pub(crate) struct ActiveSpan {
uid: SpanUid,
span_id: i32,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl ActiveSpan {
fn new(uid: SpanUid, span_id: i32) -> Self {
Self {
uid,
span_id,
r#ref: None,
}
}
#[inline]
pub(crate) fn uid(&self) -> SpanUid {
self.uid
}
}
pub(crate) struct FinalizeSpan {
uid: SpanUid,
/// When the span is [AsyncSpan] and unfinished, it is None.
obj: Option<SpanObject>,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl FinalizeSpan {
pub(crate) fn new(
uid: usize,
obj: Option<SpanObject>,
r#ref: Option<SegmentReference>,
) -> Self {
Self { uid, obj, r#ref }
}
}
#[derive(Default)]
pub(crate) struct SpanStack {
pub(crate) finalized: RwLock<Vec<FinalizeSpan>>,
pub(crate) active: RwLock<Vec<ActiveSpan>>,
}
impl SpanStack {
pub(crate) fn finalized(&self) -> RwLockReadGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_read().expect(LOCK_MSG)
}
pub(crate) fn finalized_mut(&self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.active.try_read().expect(LOCK_MSG)
}
pub(crate) fn active_mut(&self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.active.try_write().expect(LOCK_MSG)
}
fn pop_active(&self, uid: SpanUid) -> Option<ActiveSpan> {
let mut stack = self.active_mut();
if stack
.last()
.map(|span| span.uid() == uid)
.unwrap_or_default()
{
stack.pop()
} else {
None
}
}
/// Close span. We can't use closed span after finalize called.
pub(crate) fn finalize_span(&self, uid: SpanUid, obj: Option<SpanObject>) {
let Some(active_span) = self.pop_active(uid) else {
panic!("Finalize span isn't the active span");
};
let finalize_span = match obj {
Some(mut obj) => {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = active_span.r#ref {
obj.refs.push(r#ref);
}
FinalizeSpan::new(uid, Some(obj), None)
}
None => FinalizeSpan::new(uid, None, active_span.r#ref),
};
self.finalized_mut().push(finalize_span);
}
/// Close async span, fill the span object.
pub(crate) fn finalize_async_span(&self, uid: SpanUid, mut obj: SpanObject) {
for finalize_span in &mut *self.finalized_mut() {
if finalize_span.uid == uid {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = take(&mut finalize_span.r#ref) {
obj.refs.push(r#ref);
}
finalize_span.obj = Some(obj);
return;
}
}
unreachable!()
}
}
/// TracingContext is the context of the tracing process. Span should only be
/// created through context, and be archived into the context after the span
/// finished.
#[must_use = "call `create_entry_span` after `TracingContext` created."]
pub struct TracingContext {
trace_id: String,
trace_segment_id: String,
service: String,
service_instance: String,
next_span_id: i32,
span_stack: Arc<SpanStack>,
primary_endpoint_name: String,
span_uid_generator: AtomicUsize,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service
}
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str {
&self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() |
}
/// Wait all async span dropped which, created by [Span::prepare_for_async].
pub fn wait(self) {
self.wg.clone().wait();
}
/// It converts tracing context into segment object.
/// This conversion should be done before sending segments into OAP.
///
/// Notice: The spans will be taken, so this method shouldn't be called
/// twice.
pub(crate) fn convert_to_segment_object(&mut self) -> SegmentObject {
let trace_id = self.trace_id().to_owned();
let trace_segment_id = self.trace_segment_id().to_owned();
let service = self.service().to_owned();
let service_instance = self.service_instance().to_owned();
let spans = take(&mut *self.finalize_spans_mut());
let spans = spans
.into_iter()
.map(|span| span.obj.expect("Some async span haven't finished"))
.collect();
SegmentObject {
trace_id,
trace_segment_id,
spans,
service,
service_instance,
is_size_limited: false,
}
}
pub(crate) fn peek_active_span_id(&self) -> Option<i32> {
self.active_span().map(|span| span.span_id)
}
fn push_active_span(&mut self, span: &SpanObject) -> SpanUid {
let uid = self.generate_span_uid();
self.primary_endpoint_name = span.operation_name.clone();
let mut stack = self.active_span_stack_mut();
stack.push(ActiveSpan::new(uid, span.span_id));
uid
}
fn upgrade_tracer(&self) -> Tracer {
self.tracer.upgrade().expect("Tracer has dropped")
}
}
impl Drop for TracingContext {
/// Convert to segment object, and send to tracer for reporting.
///
/// # Panics
///
/// Panic if tracer is dropped.
fn drop(&mut self) {
self.upgrade_tracer().finalize_context(self)
}
}
/// Cross threads context snapshot.
#[derive(Debug)]
pub struct ContextSnapshot {
trace_id: String,
trace_segment_id: String,
span_id: i32,
parent_endpoint: String,
}
impl ContextSnapshot {
/// Check if the snapshot is created from current context.
pub fn is_from_current(&self, context: &TracingContext) -> bool {
!self.trace_segment_id.is_empty() && self.trace_segment_id == context.trace_segment_id()
}
/// Check if the snapshot is valid.
pub fn is_valid(&self) -> bool {
!self.trace_segment_id.is_empty() && self.span_id > -1 &&!self.trace_id.is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
trait AssertSend: Send {}
impl AssertSend for TracingContext {}
}
| {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
} | conditional_block |
obj.rs | use std::error::Error;
use std::f32::consts::PI;
use std::ffi::OsStr;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use gleam::gl;
use gleam::gl::types::{GLint, GLsizei};
use image::GenericImageView;
use super::Context;
use error::io_error;
use matrix::{identity, matmul, rotate_x, rotate_y, scale, translate, vec2, vec3, Vec2, Vec3};
use render::{get_tex_const, Color, Drawable};
#[derive(Debug)]
pub struct Face<T> {
indices: Vec<FaceIndex<T>>,
}
fn face<T>(indices: Vec<FaceIndex<T>>) -> Face<T> {
Face { indices }
}
#[derive(Debug)]
pub struct FaceIndex<T> {
vertex_index: T,
texture_index: Option<T>,
normal_index: Option<T>,
}
impl<T> FromStr for FaceIndex<T>
where
T: FromStr + Default,
<T as FromStr>::Err:'static + Error + Send + Sync,
{
type Err = io::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut tokens = s.split('/');
// Get vertex index
let vertex_index: T = tokens
.next()
.ok_or_else(|| io_error("Missing vertex index"))?
.parse()
.map_err(io_error)?;
let texture_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
let normal_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
Ok(FaceIndex {
vertex_index,
texture_index,
normal_index,
})
}
}
#[derive(Debug)]
pub struct Group {
pub name: String,
pub faces: Vec<Face<u32>>,
}
impl Group {
pub fn new(name: &str) -> Self {
Group {
name: name.into(),
faces: Vec::new(),
}
}
}
struct Material {
/// Ka
ambient_color: Color,
/// Kd
diffuse_color: Color,
/// Ks
specular_color: Color,
/// Ns
specular_exponent: f32,
/// Ni
optical_density: f32,
/// d or Tr
transparency: f32,
// TODO: illum
// TODO: maps
}
pub struct Obj {
groups: Vec<Group>,
vert_start: GLint,
num_verts: GLsizei,
pub vertices: Vec<Vec3>,
pub normals: Vec<Vec3>,
pub texture_coords: Vec<Vec2>,
center: Vec3,
scale: Vec3,
translate: Vec3,
texture_path: PathBuf,
cur_texture: u8,
}
impl Obj {
/// Loads a render object from a path
pub fn load<P, PP>(
obj_path: P,
texture_path: PP,
cur_texture: &mut u8,
scale: Vec3,
translate: Vec3,
) -> Result<Self, io::Error>
where
P: AsRef<Path> + std::fmt::Display,
PP: AsRef<OsStr> + Sized,
{
// Get the path as string for later
let path_str = obj_path.to_string();
// Read the obj file
let obj_file = File::open(obj_path)?;
// Create reader for the file
let obj_file = BufReader::new(obj_file);
// Buffers for data
let mut vertices: Vec<Vec3> = Vec::new();
let mut normals: Vec<Vec3> = Vec::new();
let mut texture_coords: Vec<Vec2> = Vec::new();
// Create list of groups
let mut groups: Vec<Group> = Vec::new();
// current group
let mut cur_group: Group = Group::new("");
// Keep track of center
let mut center: Vec3 = Vec3::origin();
// Keep track of vertices for averaging center
// Float is used here for division
let mut num_vertices: f32 = 0.0;
for line in obj_file.lines() {
// Unwrap the line
let line = line?;
// Ignore comments
if line.starts_with('#') {
continue;
}
// Split line into tokens
let mut tokens = line.split_whitespace();
// Read the first token
let ty = match tokens.next() {
Some(token) => token,
// Skip empty lines
None => {
continue;
}
};
// Handle it
match ty {
"g" => {
// Read group name
let name = tokens.next().unwrap_or("unnamed");
// Insert old group into groups
if!cur_group.faces.is_empty() {
groups.push(cur_group);
}
// Create new group
cur_group = Group::new(name);
}
"v" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
// Collect into a vector
let v = vec3(x, y, z);
// Factor vertex into the center
center = ¢er + v;
// Add to number of vertices
num_vertices += 1.0;
// Add vector into the list
vertices.push(v);
}
"vn" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
normals.push(vec3(x, y, z));
}
"vt" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
texture_coords.push(vec2(x, y));
}
"f" => |
other => {
eprintln!("Unhandled line type: {}", other);
}
}
}
// Push the last group
groups.push(cur_group);
// Average out the center
let center = center * (1.0 / (num_vertices as f32));
println!("Center for {} is {:?}", path_str, center);
// Iterate texture counter forward
*cur_texture += 1;
// Generate the render object
Ok(Obj {
groups,
vert_start: 0,
num_verts: 0,
vertices,
normals,
texture_coords,
center,
scale,
translate,
texture_path: Path::new(&texture_path).to_path_buf(),
cur_texture: *cur_texture,
})
}
pub fn to_vertices(&self, group: &Group) -> Vec<f32> {
// Generate vertex list from face list
group
.faces
.iter()
// For each face, get the vertex, normal, and texture coordinates
// of all its components
.flat_map(|face| {
face.indices.iter().map(|index| {
(
// Get the vertex for this
/*(&(&self.vertices[(index.vertex_index - 1) as usize] - self.center)
+ self.translate)
.scale(self.scale.x, self.scale.y, self.scale.z),*/
// Get the vertex for this
&self.vertices[(index.vertex_index - 1) as usize] - self.center,
index
.normal_index
.map(|normal_index| self.normals[(normal_index - 1) as usize])
.unwrap_or_else(Vec3::origin),
index
.texture_index
.map(|texture_index| self.texture_coords[(texture_index - 1) as usize])
.unwrap_or_else(Vec2::origin),
)
})
})
// Flatten out everything
.flat_map(|(vertex, normal, texture)| {
#[cfg_attr(rustfmt, rustfmt_skip)]
vec![
vertex.x, vertex.y, vertex.z,
normal.x, normal.y, normal.z,
texture.x, texture.y,
]
})
.collect()
}
}
impl Drawable for Obj {
/// Returns buffer data
fn buffer_data(&mut self, vertex_start: GLint) -> Vec<f32> {
// Store element start
self.vert_start = vertex_start;
// Store vertex data
let mut vertices: Vec<f32> = Vec::new();
// Iterate over groups
for group in &self.groups {
// Extract data for the current group
let cur_vertices = self.to_vertices(group);
// Add existing data
vertices.extend_from_slice(&cur_vertices);
}
// Store the number of vertices
self.num_verts = (vertices.len() / 8) as GLsizei;
// Return vertices
vertices
}
/// Loads textures
fn load_texture(&self, ctx: &Context) {
let gl = &ctx.gl;
// Read texture
let tex_image = image::open(self.texture_path.clone()).unwrap();
// Extract dimensions
let (width, height) = tex_image.dimensions();
// Get image as raw bytes
let tex_image = tex_image.as_rgb8().unwrap().clone();
// Create a texture
let texture = gl.gen_textures(1)[0];
// Get the texture index as a glenum
let tex_enum = get_tex_const(self.cur_texture);
gl.active_texture(tex_enum);
gl.bind_texture(gl::TEXTURE_2D, texture);
gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
gl.tex_image_2d(
gl::TEXTURE_2D,
0,
gl::RGB as i32,
width as i32,
height as i32,
0,
gl::RGB,
gl::UNSIGNED_BYTE,
Some(&tex_image),
);
gl.generate_mipmap(gl::TEXTURE_2D);
gl.tex_parameter_i(
gl::TEXTURE_2D,
gl::TEXTURE_MIN_FILTER,
gl::LINEAR_MIPMAP_LINEAR as i32,
);
}
/// Draws the object
// Return groups
fn draw(&self, ctx: &Context) {
let gl = &ctx.gl;
let mv_location = gl.get_uniform_location(ctx.program, "uMVMatrix");
let m_matrix = identity();
let v_matrix = matmul(
rotate_y(PI),
matmul(
scale(self.scale.x, self.scale.y, self.scale.z),
matmul(
translate(self.translate.x, self.translate.y, self.translate.z),
ctx.camera,
),
),
);
let mv_matrix = matmul(v_matrix, m_matrix);
gl.uniform_matrix_4fv(mv_location, false, &mv_matrix);
let sampler_location = gl.get_uniform_location(ctx.program, "uSampler");
gl.uniform_1i(sampler_location, self.cur_texture as i32);
// Lighting properties
let ambient_location = gl.get_uniform_location(ctx.program, "uAmbientProduct");
let diffuse_location = gl.get_uniform_location(ctx.program, "uDiffuseProduct");
let specular_location = gl.get_uniform_location(ctx.program, "uSpecularProduct");
// Light position
let shininess_location = gl.get_uniform_location(ctx.program, "uShininess");
gl.uniform_4f(ambient_location, 0.8, 0.8, 0.8, 1.0);
gl.uniform_4f(diffuse_location, 0.75164, 0.60648, 0.22648, 1.0);
gl.uniform_4f(specular_location, 0.628281, 0.555802, 0.366065, 1.0);
gl.uniform_1f(shininess_location, 0.4 * 128.0);
gl.draw_arrays(gl::TRIANGLES, self.vert_start / 8, self.num_verts);
}
}
| {
let face_indices = tokens.map(FaceIndex::from_str).flatten().collect();
cur_group.faces.push(face(face_indices));
} | conditional_block |
obj.rs | use std::error::Error;
use std::f32::consts::PI;
use std::ffi::OsStr;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use gleam::gl;
use gleam::gl::types::{GLint, GLsizei};
use image::GenericImageView;
use super::Context;
use error::io_error;
use matrix::{identity, matmul, rotate_x, rotate_y, scale, translate, vec2, vec3, Vec2, Vec3};
use render::{get_tex_const, Color, Drawable};
#[derive(Debug)]
pub struct Face<T> {
indices: Vec<FaceIndex<T>>,
}
fn face<T>(indices: Vec<FaceIndex<T>>) -> Face<T> {
Face { indices }
}
#[derive(Debug)]
pub struct FaceIndex<T> {
vertex_index: T,
texture_index: Option<T>,
normal_index: Option<T>,
}
impl<T> FromStr for FaceIndex<T>
where
T: FromStr + Default,
<T as FromStr>::Err:'static + Error + Send + Sync,
{
type Err = io::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut tokens = s.split('/');
// Get vertex index
let vertex_index: T = tokens
.next()
.ok_or_else(|| io_error("Missing vertex index"))?
.parse()
.map_err(io_error)?;
let texture_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
let normal_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
Ok(FaceIndex {
vertex_index,
texture_index,
normal_index,
})
}
}
#[derive(Debug)]
pub struct Group {
pub name: String,
pub faces: Vec<Face<u32>>,
}
impl Group {
pub fn | (name: &str) -> Self {
Group {
name: name.into(),
faces: Vec::new(),
}
}
}
struct Material {
/// Ka
ambient_color: Color,
/// Kd
diffuse_color: Color,
/// Ks
specular_color: Color,
/// Ns
specular_exponent: f32,
/// Ni
optical_density: f32,
/// d or Tr
transparency: f32,
// TODO: illum
// TODO: maps
}
pub struct Obj {
groups: Vec<Group>,
vert_start: GLint,
num_verts: GLsizei,
pub vertices: Vec<Vec3>,
pub normals: Vec<Vec3>,
pub texture_coords: Vec<Vec2>,
center: Vec3,
scale: Vec3,
translate: Vec3,
texture_path: PathBuf,
cur_texture: u8,
}
impl Obj {
/// Loads a render object from a path
pub fn load<P, PP>(
obj_path: P,
texture_path: PP,
cur_texture: &mut u8,
scale: Vec3,
translate: Vec3,
) -> Result<Self, io::Error>
where
P: AsRef<Path> + std::fmt::Display,
PP: AsRef<OsStr> + Sized,
{
// Get the path as string for later
let path_str = obj_path.to_string();
// Read the obj file
let obj_file = File::open(obj_path)?;
// Create reader for the file
let obj_file = BufReader::new(obj_file);
// Buffers for data
let mut vertices: Vec<Vec3> = Vec::new();
let mut normals: Vec<Vec3> = Vec::new();
let mut texture_coords: Vec<Vec2> = Vec::new();
// Create list of groups
let mut groups: Vec<Group> = Vec::new();
// current group
let mut cur_group: Group = Group::new("");
// Keep track of center
let mut center: Vec3 = Vec3::origin();
// Keep track of vertices for averaging center
// Float is used here for division
let mut num_vertices: f32 = 0.0;
for line in obj_file.lines() {
// Unwrap the line
let line = line?;
// Ignore comments
if line.starts_with('#') {
continue;
}
// Split line into tokens
let mut tokens = line.split_whitespace();
// Read the first token
let ty = match tokens.next() {
Some(token) => token,
// Skip empty lines
None => {
continue;
}
};
// Handle it
match ty {
"g" => {
// Read group name
let name = tokens.next().unwrap_or("unnamed");
// Insert old group into groups
if!cur_group.faces.is_empty() {
groups.push(cur_group);
}
// Create new group
cur_group = Group::new(name);
}
"v" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
// Collect into a vector
let v = vec3(x, y, z);
// Factor vertex into the center
center = ¢er + v;
// Add to number of vertices
num_vertices += 1.0;
// Add vector into the list
vertices.push(v);
}
"vn" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
normals.push(vec3(x, y, z));
}
"vt" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
texture_coords.push(vec2(x, y));
}
"f" => {
let face_indices = tokens.map(FaceIndex::from_str).flatten().collect();
cur_group.faces.push(face(face_indices));
}
other => {
eprintln!("Unhandled line type: {}", other);
}
}
}
// Push the last group
groups.push(cur_group);
// Average out the center
let center = center * (1.0 / (num_vertices as f32));
println!("Center for {} is {:?}", path_str, center);
// Iterate texture counter forward
*cur_texture += 1;
// Generate the render object
Ok(Obj {
groups,
vert_start: 0,
num_verts: 0,
vertices,
normals,
texture_coords,
center,
scale,
translate,
texture_path: Path::new(&texture_path).to_path_buf(),
cur_texture: *cur_texture,
})
}
pub fn to_vertices(&self, group: &Group) -> Vec<f32> {
// Generate vertex list from face list
group
.faces
.iter()
// For each face, get the vertex, normal, and texture coordinates
// of all its components
.flat_map(|face| {
face.indices.iter().map(|index| {
(
// Get the vertex for this
/*(&(&self.vertices[(index.vertex_index - 1) as usize] - self.center)
+ self.translate)
.scale(self.scale.x, self.scale.y, self.scale.z),*/
// Get the vertex for this
&self.vertices[(index.vertex_index - 1) as usize] - self.center,
index
.normal_index
.map(|normal_index| self.normals[(normal_index - 1) as usize])
.unwrap_or_else(Vec3::origin),
index
.texture_index
.map(|texture_index| self.texture_coords[(texture_index - 1) as usize])
.unwrap_or_else(Vec2::origin),
)
})
})
// Flatten out everything
.flat_map(|(vertex, normal, texture)| {
#[cfg_attr(rustfmt, rustfmt_skip)]
vec![
vertex.x, vertex.y, vertex.z,
normal.x, normal.y, normal.z,
texture.x, texture.y,
]
})
.collect()
}
}
impl Drawable for Obj {
/// Returns buffer data
fn buffer_data(&mut self, vertex_start: GLint) -> Vec<f32> {
// Store element start
self.vert_start = vertex_start;
// Store vertex data
let mut vertices: Vec<f32> = Vec::new();
// Iterate over groups
for group in &self.groups {
// Extract data for the current group
let cur_vertices = self.to_vertices(group);
// Add existing data
vertices.extend_from_slice(&cur_vertices);
}
// Store the number of vertices
self.num_verts = (vertices.len() / 8) as GLsizei;
// Return vertices
vertices
}
/// Loads textures
fn load_texture(&self, ctx: &Context) {
let gl = &ctx.gl;
// Read texture
let tex_image = image::open(self.texture_path.clone()).unwrap();
// Extract dimensions
let (width, height) = tex_image.dimensions();
// Get image as raw bytes
let tex_image = tex_image.as_rgb8().unwrap().clone();
// Create a texture
let texture = gl.gen_textures(1)[0];
// Get the texture index as a glenum
let tex_enum = get_tex_const(self.cur_texture);
gl.active_texture(tex_enum);
gl.bind_texture(gl::TEXTURE_2D, texture);
gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
gl.tex_image_2d(
gl::TEXTURE_2D,
0,
gl::RGB as i32,
width as i32,
height as i32,
0,
gl::RGB,
gl::UNSIGNED_BYTE,
Some(&tex_image),
);
gl.generate_mipmap(gl::TEXTURE_2D);
gl.tex_parameter_i(
gl::TEXTURE_2D,
gl::TEXTURE_MIN_FILTER,
gl::LINEAR_MIPMAP_LINEAR as i32,
);
}
/// Draws the object
// Return groups
fn draw(&self, ctx: &Context) {
let gl = &ctx.gl;
let mv_location = gl.get_uniform_location(ctx.program, "uMVMatrix");
let m_matrix = identity();
let v_matrix = matmul(
rotate_y(PI),
matmul(
scale(self.scale.x, self.scale.y, self.scale.z),
matmul(
translate(self.translate.x, self.translate.y, self.translate.z),
ctx.camera,
),
),
);
let mv_matrix = matmul(v_matrix, m_matrix);
gl.uniform_matrix_4fv(mv_location, false, &mv_matrix);
let sampler_location = gl.get_uniform_location(ctx.program, "uSampler");
gl.uniform_1i(sampler_location, self.cur_texture as i32);
// Lighting properties
let ambient_location = gl.get_uniform_location(ctx.program, "uAmbientProduct");
let diffuse_location = gl.get_uniform_location(ctx.program, "uDiffuseProduct");
let specular_location = gl.get_uniform_location(ctx.program, "uSpecularProduct");
// Light position
let shininess_location = gl.get_uniform_location(ctx.program, "uShininess");
gl.uniform_4f(ambient_location, 0.8, 0.8, 0.8, 1.0);
gl.uniform_4f(diffuse_location, 0.75164, 0.60648, 0.22648, 1.0);
gl.uniform_4f(specular_location, 0.628281, 0.555802, 0.366065, 1.0);
gl.uniform_1f(shininess_location, 0.4 * 128.0);
gl.draw_arrays(gl::TRIANGLES, self.vert_start / 8, self.num_verts);
}
}
| new | identifier_name |
obj.rs | use std::error::Error;
use std::f32::consts::PI;
use std::ffi::OsStr;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use gleam::gl;
use gleam::gl::types::{GLint, GLsizei};
use image::GenericImageView;
use super::Context;
use error::io_error;
use matrix::{identity, matmul, rotate_x, rotate_y, scale, translate, vec2, vec3, Vec2, Vec3};
use render::{get_tex_const, Color, Drawable};
#[derive(Debug)]
pub struct Face<T> {
indices: Vec<FaceIndex<T>>,
}
fn face<T>(indices: Vec<FaceIndex<T>>) -> Face<T> {
Face { indices }
}
#[derive(Debug)]
pub struct FaceIndex<T> {
vertex_index: T,
texture_index: Option<T>,
normal_index: Option<T>,
}
impl<T> FromStr for FaceIndex<T>
where
T: FromStr + Default,
<T as FromStr>::Err:'static + Error + Send + Sync,
{
type Err = io::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut tokens = s.split('/');
// Get vertex index
let vertex_index: T = tokens
.next()
.ok_or_else(|| io_error("Missing vertex index"))?
.parse()
.map_err(io_error)?;
let texture_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
let normal_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
Ok(FaceIndex {
vertex_index,
texture_index,
normal_index,
})
}
}
#[derive(Debug)]
pub struct Group {
pub name: String,
pub faces: Vec<Face<u32>>,
}
impl Group {
pub fn new(name: &str) -> Self {
Group { | name: name.into(),
faces: Vec::new(),
}
}
}
struct Material {
/// Ka
ambient_color: Color,
/// Kd
diffuse_color: Color,
/// Ks
specular_color: Color,
/// Ns
specular_exponent: f32,
/// Ni
optical_density: f32,
/// d or Tr
transparency: f32,
// TODO: illum
// TODO: maps
}
pub struct Obj {
groups: Vec<Group>,
vert_start: GLint,
num_verts: GLsizei,
pub vertices: Vec<Vec3>,
pub normals: Vec<Vec3>,
pub texture_coords: Vec<Vec2>,
center: Vec3,
scale: Vec3,
translate: Vec3,
texture_path: PathBuf,
cur_texture: u8,
}
impl Obj {
/// Loads a render object from a path
pub fn load<P, PP>(
obj_path: P,
texture_path: PP,
cur_texture: &mut u8,
scale: Vec3,
translate: Vec3,
) -> Result<Self, io::Error>
where
P: AsRef<Path> + std::fmt::Display,
PP: AsRef<OsStr> + Sized,
{
// Get the path as string for later
let path_str = obj_path.to_string();
// Read the obj file
let obj_file = File::open(obj_path)?;
// Create reader for the file
let obj_file = BufReader::new(obj_file);
// Buffers for data
let mut vertices: Vec<Vec3> = Vec::new();
let mut normals: Vec<Vec3> = Vec::new();
let mut texture_coords: Vec<Vec2> = Vec::new();
// Create list of groups
let mut groups: Vec<Group> = Vec::new();
// current group
let mut cur_group: Group = Group::new("");
// Keep track of center
let mut center: Vec3 = Vec3::origin();
// Keep track of vertices for averaging center
// Float is used here for division
let mut num_vertices: f32 = 0.0;
for line in obj_file.lines() {
// Unwrap the line
let line = line?;
// Ignore comments
if line.starts_with('#') {
continue;
}
// Split line into tokens
let mut tokens = line.split_whitespace();
// Read the first token
let ty = match tokens.next() {
Some(token) => token,
// Skip empty lines
None => {
continue;
}
};
// Handle it
match ty {
"g" => {
// Read group name
let name = tokens.next().unwrap_or("unnamed");
// Insert old group into groups
if!cur_group.faces.is_empty() {
groups.push(cur_group);
}
// Create new group
cur_group = Group::new(name);
}
"v" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
// Collect into a vector
let v = vec3(x, y, z);
// Factor vertex into the center
center = ¢er + v;
// Add to number of vertices
num_vertices += 1.0;
// Add vector into the list
vertices.push(v);
}
"vn" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
normals.push(vec3(x, y, z));
}
"vt" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
texture_coords.push(vec2(x, y));
}
"f" => {
let face_indices = tokens.map(FaceIndex::from_str).flatten().collect();
cur_group.faces.push(face(face_indices));
}
other => {
eprintln!("Unhandled line type: {}", other);
}
}
}
// Push the last group
groups.push(cur_group);
// Average out the center
let center = center * (1.0 / (num_vertices as f32));
println!("Center for {} is {:?}", path_str, center);
// Iterate texture counter forward
*cur_texture += 1;
// Generate the render object
Ok(Obj {
groups,
vert_start: 0,
num_verts: 0,
vertices,
normals,
texture_coords,
center,
scale,
translate,
texture_path: Path::new(&texture_path).to_path_buf(),
cur_texture: *cur_texture,
})
}
pub fn to_vertices(&self, group: &Group) -> Vec<f32> {
// Generate vertex list from face list
group
.faces
.iter()
// For each face, get the vertex, normal, and texture coordinates
// of all its components
.flat_map(|face| {
face.indices.iter().map(|index| {
(
// Get the vertex for this
/*(&(&self.vertices[(index.vertex_index - 1) as usize] - self.center)
+ self.translate)
.scale(self.scale.x, self.scale.y, self.scale.z),*/
// Get the vertex for this
&self.vertices[(index.vertex_index - 1) as usize] - self.center,
index
.normal_index
.map(|normal_index| self.normals[(normal_index - 1) as usize])
.unwrap_or_else(Vec3::origin),
index
.texture_index
.map(|texture_index| self.texture_coords[(texture_index - 1) as usize])
.unwrap_or_else(Vec2::origin),
)
})
})
// Flatten out everything
.flat_map(|(vertex, normal, texture)| {
#[cfg_attr(rustfmt, rustfmt_skip)]
vec![
vertex.x, vertex.y, vertex.z,
normal.x, normal.y, normal.z,
texture.x, texture.y,
]
})
.collect()
}
}
impl Drawable for Obj {
/// Returns buffer data
fn buffer_data(&mut self, vertex_start: GLint) -> Vec<f32> {
// Store element start
self.vert_start = vertex_start;
// Store vertex data
let mut vertices: Vec<f32> = Vec::new();
// Iterate over groups
for group in &self.groups {
// Extract data for the current group
let cur_vertices = self.to_vertices(group);
// Add existing data
vertices.extend_from_slice(&cur_vertices);
}
// Store the number of vertices
self.num_verts = (vertices.len() / 8) as GLsizei;
// Return vertices
vertices
}
/// Loads textures
fn load_texture(&self, ctx: &Context) {
let gl = &ctx.gl;
// Read texture
let tex_image = image::open(self.texture_path.clone()).unwrap();
// Extract dimensions
let (width, height) = tex_image.dimensions();
// Get image as raw bytes
let tex_image = tex_image.as_rgb8().unwrap().clone();
// Create a texture
let texture = gl.gen_textures(1)[0];
// Get the texture index as a glenum
let tex_enum = get_tex_const(self.cur_texture);
gl.active_texture(tex_enum);
gl.bind_texture(gl::TEXTURE_2D, texture);
gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
gl.tex_image_2d(
gl::TEXTURE_2D,
0,
gl::RGB as i32,
width as i32,
height as i32,
0,
gl::RGB,
gl::UNSIGNED_BYTE,
Some(&tex_image),
);
gl.generate_mipmap(gl::TEXTURE_2D);
gl.tex_parameter_i(
gl::TEXTURE_2D,
gl::TEXTURE_MIN_FILTER,
gl::LINEAR_MIPMAP_LINEAR as i32,
);
}
/// Draws the object
// Return groups
fn draw(&self, ctx: &Context) {
let gl = &ctx.gl;
let mv_location = gl.get_uniform_location(ctx.program, "uMVMatrix");
let m_matrix = identity();
let v_matrix = matmul(
rotate_y(PI),
matmul(
scale(self.scale.x, self.scale.y, self.scale.z),
matmul(
translate(self.translate.x, self.translate.y, self.translate.z),
ctx.camera,
),
),
);
let mv_matrix = matmul(v_matrix, m_matrix);
gl.uniform_matrix_4fv(mv_location, false, &mv_matrix);
let sampler_location = gl.get_uniform_location(ctx.program, "uSampler");
gl.uniform_1i(sampler_location, self.cur_texture as i32);
// Lighting properties
let ambient_location = gl.get_uniform_location(ctx.program, "uAmbientProduct");
let diffuse_location = gl.get_uniform_location(ctx.program, "uDiffuseProduct");
let specular_location = gl.get_uniform_location(ctx.program, "uSpecularProduct");
// Light position
let shininess_location = gl.get_uniform_location(ctx.program, "uShininess");
gl.uniform_4f(ambient_location, 0.8, 0.8, 0.8, 1.0);
gl.uniform_4f(diffuse_location, 0.75164, 0.60648, 0.22648, 1.0);
gl.uniform_4f(specular_location, 0.628281, 0.555802, 0.366065, 1.0);
gl.uniform_1f(shininess_location, 0.4 * 128.0);
gl.draw_arrays(gl::TRIANGLES, self.vert_start / 8, self.num_verts);
}
} | random_line_split |
|
limit.rs | //! Data structures to help perform rate limiting.
use std::collections::{HashMap, VecDeque};
use std::cmp;
use std::fmt::Debug;
use std::io::{self, Read, Write, ErrorKind};
use std::result::Result;
use bytes::{BytesMut, Buf, BufMut};
use crate::util::RorW;
use self::Status::*;
/// Generic buffer for rate-limiting, both reading and writing.
#[derive(Debug)]
pub struct RLBuf {
/// Buffer to help determine demand, for rate-limiting.
buf: BytesMut,
/// Index into `buf`, of the first data not allowed to be used. Everything
/// before it will be used upon request.
///
/// "Used" means `read` by a higher layer, or `write` by a lower layer.
allowance: usize,
/// Amount of data read out since last call to `reset_usage`.
last_used: usize,
}
impl RLBuf {
/** Create a new `RLBuf` with the given lower bound on the initial capacity.
The actual capacity can be got later with `get_demand_cap`.
*/
pub fn new_lb(init: usize) -> RLBuf {
RLBuf {
buf: BytesMut::with_capacity(init),
allowance: 0,
last_used: 0,
}
}
/** Get the current demand.
For higher-level rate-limiting logic, to determine how to rate-limit.
*/
pub fn get_demand(&self) -> usize {
self.buf.len()
}
/** Get the current buffer capacity, i.e. allocated memory.
For higher-level rate-limiting logic, to monitor resource usage, to help it
analyse how efficient it is.
*/
pub fn get_demand_cap(&self) -> usize {
self.buf.capacity()
}
pub fn get_demand_remaining(&self) -> usize {
self.buf.capacity() - self.buf.len()
}
/** Add the allowance, which must not be greater than the demand.
For higher-level rate-limiting logic, as it performs the rate-limiting.
*/
pub fn add_allowance(&mut self, allowance: usize) {
if self.allowance + allowance > self.get_demand() {
panic!("allowance > demand");
}
self.allowance += allowance
}
/** Return the latest usage figures & reset them back to zero.
The first number is the number of allowed bytes that were unused.
The second number is the number of allowed bytes that were used.
For higher-level rate-limiting logic, before rate-limiting is performed, to
detect consumers that consumed even more slowly than the rate limit in the
previous cycle. In response to this, the higher-level logic should give less
allowance for this consumer, to avoid waste.
*/
pub fn reset_usage(&mut self) -> (usize, usize) {
let wasted = self.allowance;
let used = self.last_used;
self.allowance = 0;
self.last_used = 0;
(wasted, used)
}
fn record_demand(&mut self, buf: &[u8]) {
self.buf.extend_from_slice(buf);
}
fn add_demand_cap(&mut self, more: usize) {
self.buf.reserve(more + self.get_demand_remaining());
}
fn take_allowance(&mut self, taken: usize) {
if taken > self.allowance {
panic!("taken > allowance");
}
self.allowance -= taken;
self.last_used += taken;
}
fn consume_read(&mut self, buf: &mut [u8]) -> usize {
let to_drain = cmp::min(buf.len(), self.allowance);
self.buf.copy_to_slice(&mut buf[..to_drain]);
self.buf.reserve(to_drain);
self.take_allowance(to_drain);
to_drain
}
fn consume_write<F, E>(&mut self, sz: usize, mut write: F) -> (usize, Option<E>)
where F: FnMut (&[u8]) -> Result<usize, E> {
let mut used = 0;
let mut err = None;
let to_drain = cmp::min(self.buf.len(), sz);
match write(&self.buf[..to_drain]) {
Ok(n) => used += n,
Err(e) => err = Some(e),
}
self.buf.advance(used);
self.add_demand_cap(used);
self.take_allowance(used);
(used, err)
}
}
fn unwrap_err_or<T, E>(r: Result<T, E>, de: E) -> E {
match r {
Ok(_) => de,
Err(e) => e,
}
}
#[derive(Debug, PartialEq, Eq)]
enum Status {
SOpen,
SOk, // eof
SErr
}
/** Rate-limited asynchronous analogue of `std::io::BufReader` + `std::io::BufWriter`.
You **must** call `flush()` before dropping this (which closes the stream).
This is even more important than doing so on `BufWriter` - if not, you may lose
data. See https://internals.rust-lang.org/t/asynchronous-destructors/11127/49
for an in-depth explanation.
*/
#[derive(Debug)]
pub struct RateLimited<T> where T:?Sized {
rstatus: Status,
pub(crate) rbuf: RLBuf,
wstatus: Status,
pub(crate) wbuf: RLBuf,
pub(crate) inner: T,
}
impl<T> RateLimited<T> {
/** Create a new `RateLimited` with the given initial capacity.
The inner stream must already be in non-blocking mode.
*/
pub fn new_lb(inner: T, init: usize) -> RateLimited<T> {
RateLimited {
inner: inner,
rstatus: SOpen,
rbuf: RLBuf::new_lb(init),
wstatus: SOpen,
wbuf: RLBuf::new_lb(init),
}
}
}
impl<T> RateLimited<T> where T: RorW +?Sized {
/** Do a pre-read.
That is, do a non-blocking read from the underlying handle, filling up the
remaining part of `rbuf`.
This is to be used by higher-level code, before it performs the rate-limiting.
*/
pub fn pre_read(&mut self) {
match self.rstatus {
SOpen => {
let remain = self.rbuf.get_demand_remaining();
if remain == 0 {
return;
}
// TODO: replace with https://github.com/rust-lang/rfcs/pull/2930
let mut buf: &mut [u8] = unsafe { std::mem::transmute(self.rbuf.buf.bytes_mut()) };
match self.inner.read(&mut buf) { // TODO: assert non-blocking
Ok(0) => {
self.rstatus = SOk;
},
Ok(n) => {
unsafe {
self.rbuf.buf.advance_mut(n);
}
if n >= remain {
// TODO: automatically grow the buffer capacity
log::debug!("rbuf pre_read filled buffer");
}
},
Err(e) => match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
// println!("pre_read: {:?}", e);
self.rstatus = SErr;
}
},
}
},
_ => (), // already finished
}
}
pub fn is_readable(&self) -> bool {
self.rstatus!= SOpen || self.rbuf.allowance > 0
}
/** Do a post-write.
That is, do a non-blocking write to the underlying handle, up to the current
allowance of `wbuf`.
This is to be used by higher-level code, after it performs the rate-limiting.
*/
pub fn post_write(&mut self) {
self.post_write_exact(self.wbuf.allowance);
}
pub fn is_writable(&self) -> bool {
self.wstatus!= SOpen || self.wbuf.get_demand_remaining() > 0
}
// extra param is exposed for testing only
fn post_write_exact(&mut self, sz: usize) -> Option<io::Error> {
match self.wbuf.get_demand() {
0 => None,
_ => match self.wbuf.allowance {
0 => None,
_ => {
let w = &mut self.inner;
let (_, err) = self.wbuf.consume_write(sz, |b| w.write(b));
if let Some(e) = err.as_ref() {
match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
self.wstatus = SErr;
},
}
}
err
}
}
}
}
}
impl<T> Read for RateLimited<T> where T: Read {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.rbuf.get_demand() {
0 => match self.rstatus {
SOpen => Err(io::Error::new(ErrorKind::WouldBlock, "")),
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.read(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
},
_ => match self.rbuf.allowance {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => Ok(self.rbuf.consume_read(buf)),
}
}
}
}
impl<T> Write for RateLimited<T> where T: Write {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> |
fn flush(&mut self) -> io::Result<()> {
match self.wstatus {
SErr =>
// if there was an error, wbuf might not have been consumed, so output error even if wbuf is non-empty
Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
_ => match self.wbuf.get_demand() {
0 => {
//println!("flush OK");
Ok(())
},
_ => {
//println!("flush waiting :( {} {}", self.wbuf.get_demand(), self.wbuf.allowance);
Err(io::Error::new(ErrorKind::WouldBlock, ""))
}, // something else is responsible for calling post_write
}
}
}
}
#[derive(Debug)]
pub struct UsageStats {
samples: VecDeque<(usize, usize)>,
max_samples: usize,
current_usage: (usize, usize), // (waste, used)
}
impl UsageStats {
pub fn new() -> UsageStats {
UsageStats {
samples: VecDeque::new(),
max_samples: 4096, // TODO: make configurable
current_usage: (0, 0),
}
}
pub fn add_current_usage(&mut self, usage: (usize, usize)) {
self.current_usage.0 += usage.0;
self.current_usage.1 += usage.1;
}
pub fn finalise_current_usage(&mut self) -> (usize, usize) {
while self.samples.len() >= self.max_samples {
self.samples.pop_front();
}
let usage = self.current_usage;
self.samples.push_back(usage);
self.current_usage = (0, 0);
usage
}
pub fn estimate_next_usage(&mut self) -> usize {
// TODO: something smarter
// TODO: do something with the waste, e.g. to give more allowance
self.samples.back().unwrap().1
}
}
pub fn derive_allowance<K>(demand: HashMap<K, usize>) -> HashMap<K, usize> {
// TODO: actually perform rate-limiting. the current code ought not
// to be (but is) much slower than the async-io version, however
// this only noticeable on localhost-localhost transfers.
demand
}
#[cfg(test)]
mod tests {
use std::fs::*;
use std::fmt::Debug;
use std::io;
use std::io::*;
use std::assert;
use crate::sys::*;
use crate::util::*;
use super::*;
fn assert_would_block<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => assert_eq!(e.kind(), ErrorKind::WouldBlock),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_error<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => match e.kind() {
ErrorKind::WouldBlock => assert!(false),
ErrorKind::Interrupted => assert!(false),
_ => (),
},
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_num_bytes(res: io::Result<usize>, s: usize) {
match res {
Ok(n) => assert_eq!(n, s),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
// TODO: /dev/null etc is not a RawSocket in windows
#[test]
fn read_eof_ok() -> io::Result<()> {
let file = File::open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(RO(file), 1);
let mut buf = [0].repeat(1);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_num_bytes(bf.read(&mut buf), 0); // eof
Ok(())
}
#[test]
fn read_zero_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let unsafe_f = unsafe { File::from_raw_source(file.as_raw_source()) };
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(RO(file), sd);
assert_eq!(sd, bf.rbuf.get_demand_cap());
assert_eq!(0, bf.rbuf.get_demand());
let mut buf = [0].repeat(sx);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_eq!(sd, bf.rbuf.get_demand());
assert_would_block(bf.read(&mut buf));
bf.rbuf.add_allowance(sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx, bf.rbuf.get_demand());
bf.rbuf.add_allowance(sx + sy);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(bf.rbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert_would_block(bf.read(&mut buf));
assert_eq!(bf.rbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SOpen, bf.rstatus);
drop(unsafe_f); // close f, to force an error on the underlying stream
bf.pre_read();
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SErr, bf.rstatus);
bf.rbuf.add_allowance(sd - sx - sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert!(sd - sx - sx - sx <= sx); // otherwise next step fails
assert_num_bytes(bf.read(&mut buf), sd - sx - sx - sx);
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
Ok(())
}
#[test]
fn write_eof_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(WO(file), 1);
let buf = [0].repeat(1);
assert_num_bytes(bf.write(&buf), 1);
bf.post_write();
assert_eq!(bf.wstatus, SOpen);
bf.wbuf.add_allowance(1);
bf.post_write();
assert_eq!(bf.wstatus, SErr);
assert_error(bf.flush());
assert_error(bf.flush());
assert_error(bf.flush());
Ok(())
}
#[test]
fn write_null_ok() -> io::Result<()> {
let file = OpenOptions::new().write(true).open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(WO(file), sd);
assert_eq!(sd, bf.wbuf.get_demand_cap());
assert_eq!(sd, bf.wbuf.get_demand_remaining());
assert_eq!(0, bf.wbuf.get_demand());
let buf = [0].repeat(sd + sx);
bf.flush()?;
assert_num_bytes(bf.write(&buf), sd);
assert_eq!(sd, bf.wbuf.get_demand());
assert_would_block(bf.write(&buf[sd..]));
bf.wbuf.add_allowance(sx);
bf.post_write();
assert_eq!(sd - sx, bf.wbuf.get_demand());
bf.wbuf.add_allowance(sx + sy);
bf.post_write_exact(sx);
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(bf.wbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert!(bf.post_write_exact(0).is_none());
assert_eq!(bf.wbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
assert_num_bytes(bf.write(&buf), sx + sx);
assert_eq!(sd, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
bf.wbuf.add_allowance(sd);
assert_would_block(bf.flush());
assert_would_block(bf.flush());
assert_would_block(bf.flush());
bf.post_write();
assert_eq!(0, bf.wbuf.get_demand());
bf.flush()
}
}
| {
match self.wstatus {
SOpen => {
// TODO: figure out when it's appropriate to automatically grow the buffer capacity
let remain = self.wbuf.get_demand_remaining();
match remain {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => {
let n = cmp::min(buf.len(), remain);
self.wbuf.record_demand(&buf[..n]);
Ok(n)
}
}
},
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
}
} | identifier_body |
limit.rs | //! Data structures to help perform rate limiting.
use std::collections::{HashMap, VecDeque};
use std::cmp;
use std::fmt::Debug;
use std::io::{self, Read, Write, ErrorKind};
use std::result::Result;
use bytes::{BytesMut, Buf, BufMut};
use crate::util::RorW;
use self::Status::*;
/// Generic buffer for rate-limiting, both reading and writing.
#[derive(Debug)]
pub struct RLBuf {
/// Buffer to help determine demand, for rate-limiting.
buf: BytesMut,
/// Index into `buf`, of the first data not allowed to be used. Everything
/// before it will be used upon request.
///
/// "Used" means `read` by a higher layer, or `write` by a lower layer.
allowance: usize,
/// Amount of data read out since last call to `reset_usage`.
last_used: usize,
}
impl RLBuf {
/** Create a new `RLBuf` with the given lower bound on the initial capacity.
The actual capacity can be got later with `get_demand_cap`.
*/
pub fn new_lb(init: usize) -> RLBuf {
RLBuf {
buf: BytesMut::with_capacity(init),
allowance: 0,
last_used: 0,
}
}
/** Get the current demand.
For higher-level rate-limiting logic, to determine how to rate-limit.
*/
pub fn get_demand(&self) -> usize {
self.buf.len()
}
/** Get the current buffer capacity, i.e. allocated memory.
For higher-level rate-limiting logic, to monitor resource usage, to help it
analyse how efficient it is.
*/
pub fn get_demand_cap(&self) -> usize {
self.buf.capacity()
}
pub fn get_demand_remaining(&self) -> usize {
self.buf.capacity() - self.buf.len()
}
/** Add the allowance, which must not be greater than the demand.
For higher-level rate-limiting logic, as it performs the rate-limiting.
*/
pub fn add_allowance(&mut self, allowance: usize) {
if self.allowance + allowance > self.get_demand() {
panic!("allowance > demand");
}
self.allowance += allowance
}
/** Return the latest usage figures & reset them back to zero.
The first number is the number of allowed bytes that were unused.
The second number is the number of allowed bytes that were used.
For higher-level rate-limiting logic, before rate-limiting is performed, to
detect consumers that consumed even more slowly than the rate limit in the
previous cycle. In response to this, the higher-level logic should give less
allowance for this consumer, to avoid waste.
*/
pub fn reset_usage(&mut self) -> (usize, usize) {
let wasted = self.allowance;
let used = self.last_used;
self.allowance = 0;
self.last_used = 0;
(wasted, used)
}
fn record_demand(&mut self, buf: &[u8]) {
self.buf.extend_from_slice(buf);
}
fn add_demand_cap(&mut self, more: usize) {
self.buf.reserve(more + self.get_demand_remaining());
}
fn take_allowance(&mut self, taken: usize) {
if taken > self.allowance {
panic!("taken > allowance");
}
self.allowance -= taken;
self.last_used += taken;
}
fn consume_read(&mut self, buf: &mut [u8]) -> usize {
let to_drain = cmp::min(buf.len(), self.allowance);
self.buf.copy_to_slice(&mut buf[..to_drain]);
self.buf.reserve(to_drain);
self.take_allowance(to_drain);
to_drain
}
fn consume_write<F, E>(&mut self, sz: usize, mut write: F) -> (usize, Option<E>)
where F: FnMut (&[u8]) -> Result<usize, E> {
let mut used = 0;
let mut err = None;
let to_drain = cmp::min(self.buf.len(), sz);
match write(&self.buf[..to_drain]) {
Ok(n) => used += n,
Err(e) => err = Some(e),
}
self.buf.advance(used);
self.add_demand_cap(used);
self.take_allowance(used);
(used, err)
}
}
fn unwrap_err_or<T, E>(r: Result<T, E>, de: E) -> E {
match r {
Ok(_) => de,
Err(e) => e,
}
}
#[derive(Debug, PartialEq, Eq)]
enum Status {
SOpen,
SOk, // eof
SErr
}
/** Rate-limited asynchronous analogue of `std::io::BufReader` + `std::io::BufWriter`.
You **must** call `flush()` before dropping this (which closes the stream).
This is even more important than doing so on `BufWriter` - if not, you may lose
data. See https://internals.rust-lang.org/t/asynchronous-destructors/11127/49
for an in-depth explanation.
*/
#[derive(Debug)]
pub struct RateLimited<T> where T:?Sized {
rstatus: Status,
pub(crate) rbuf: RLBuf,
wstatus: Status,
pub(crate) wbuf: RLBuf, | pub(crate) inner: T,
}
impl<T> RateLimited<T> {
/** Create a new `RateLimited` with the given initial capacity.
The inner stream must already be in non-blocking mode.
*/
pub fn new_lb(inner: T, init: usize) -> RateLimited<T> {
RateLimited {
inner: inner,
rstatus: SOpen,
rbuf: RLBuf::new_lb(init),
wstatus: SOpen,
wbuf: RLBuf::new_lb(init),
}
}
}
impl<T> RateLimited<T> where T: RorW +?Sized {
/** Do a pre-read.
That is, do a non-blocking read from the underlying handle, filling up the
remaining part of `rbuf`.
This is to be used by higher-level code, before it performs the rate-limiting.
*/
pub fn pre_read(&mut self) {
match self.rstatus {
SOpen => {
let remain = self.rbuf.get_demand_remaining();
if remain == 0 {
return;
}
// TODO: replace with https://github.com/rust-lang/rfcs/pull/2930
let mut buf: &mut [u8] = unsafe { std::mem::transmute(self.rbuf.buf.bytes_mut()) };
match self.inner.read(&mut buf) { // TODO: assert non-blocking
Ok(0) => {
self.rstatus = SOk;
},
Ok(n) => {
unsafe {
self.rbuf.buf.advance_mut(n);
}
if n >= remain {
// TODO: automatically grow the buffer capacity
log::debug!("rbuf pre_read filled buffer");
}
},
Err(e) => match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
// println!("pre_read: {:?}", e);
self.rstatus = SErr;
}
},
}
},
_ => (), // already finished
}
}
pub fn is_readable(&self) -> bool {
self.rstatus!= SOpen || self.rbuf.allowance > 0
}
/** Do a post-write.
That is, do a non-blocking write to the underlying handle, up to the current
allowance of `wbuf`.
This is to be used by higher-level code, after it performs the rate-limiting.
*/
pub fn post_write(&mut self) {
self.post_write_exact(self.wbuf.allowance);
}
pub fn is_writable(&self) -> bool {
self.wstatus!= SOpen || self.wbuf.get_demand_remaining() > 0
}
// extra param is exposed for testing only
fn post_write_exact(&mut self, sz: usize) -> Option<io::Error> {
match self.wbuf.get_demand() {
0 => None,
_ => match self.wbuf.allowance {
0 => None,
_ => {
let w = &mut self.inner;
let (_, err) = self.wbuf.consume_write(sz, |b| w.write(b));
if let Some(e) = err.as_ref() {
match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
self.wstatus = SErr;
},
}
}
err
}
}
}
}
}
impl<T> Read for RateLimited<T> where T: Read {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.rbuf.get_demand() {
0 => match self.rstatus {
SOpen => Err(io::Error::new(ErrorKind::WouldBlock, "")),
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.read(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
},
_ => match self.rbuf.allowance {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => Ok(self.rbuf.consume_read(buf)),
}
}
}
}
impl<T> Write for RateLimited<T> where T: Write {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self.wstatus {
SOpen => {
// TODO: figure out when it's appropriate to automatically grow the buffer capacity
let remain = self.wbuf.get_demand_remaining();
match remain {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => {
let n = cmp::min(buf.len(), remain);
self.wbuf.record_demand(&buf[..n]);
Ok(n)
}
}
},
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
}
}
fn flush(&mut self) -> io::Result<()> {
match self.wstatus {
SErr =>
// if there was an error, wbuf might not have been consumed, so output error even if wbuf is non-empty
Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
_ => match self.wbuf.get_demand() {
0 => {
//println!("flush OK");
Ok(())
},
_ => {
//println!("flush waiting :( {} {}", self.wbuf.get_demand(), self.wbuf.allowance);
Err(io::Error::new(ErrorKind::WouldBlock, ""))
}, // something else is responsible for calling post_write
}
}
}
}
#[derive(Debug)]
pub struct UsageStats {
samples: VecDeque<(usize, usize)>,
max_samples: usize,
current_usage: (usize, usize), // (waste, used)
}
impl UsageStats {
pub fn new() -> UsageStats {
UsageStats {
samples: VecDeque::new(),
max_samples: 4096, // TODO: make configurable
current_usage: (0, 0),
}
}
pub fn add_current_usage(&mut self, usage: (usize, usize)) {
self.current_usage.0 += usage.0;
self.current_usage.1 += usage.1;
}
pub fn finalise_current_usage(&mut self) -> (usize, usize) {
while self.samples.len() >= self.max_samples {
self.samples.pop_front();
}
let usage = self.current_usage;
self.samples.push_back(usage);
self.current_usage = (0, 0);
usage
}
pub fn estimate_next_usage(&mut self) -> usize {
// TODO: something smarter
// TODO: do something with the waste, e.g. to give more allowance
self.samples.back().unwrap().1
}
}
pub fn derive_allowance<K>(demand: HashMap<K, usize>) -> HashMap<K, usize> {
// TODO: actually perform rate-limiting. the current code ought not
// to be (but is) much slower than the async-io version, however
// this only noticeable on localhost-localhost transfers.
demand
}
#[cfg(test)]
mod tests {
use std::fs::*;
use std::fmt::Debug;
use std::io;
use std::io::*;
use std::assert;
use crate::sys::*;
use crate::util::*;
use super::*;
fn assert_would_block<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => assert_eq!(e.kind(), ErrorKind::WouldBlock),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_error<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => match e.kind() {
ErrorKind::WouldBlock => assert!(false),
ErrorKind::Interrupted => assert!(false),
_ => (),
},
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_num_bytes(res: io::Result<usize>, s: usize) {
match res {
Ok(n) => assert_eq!(n, s),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
// TODO: /dev/null etc is not a RawSocket in windows
#[test]
fn read_eof_ok() -> io::Result<()> {
let file = File::open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(RO(file), 1);
let mut buf = [0].repeat(1);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_num_bytes(bf.read(&mut buf), 0); // eof
Ok(())
}
#[test]
fn read_zero_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let unsafe_f = unsafe { File::from_raw_source(file.as_raw_source()) };
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(RO(file), sd);
assert_eq!(sd, bf.rbuf.get_demand_cap());
assert_eq!(0, bf.rbuf.get_demand());
let mut buf = [0].repeat(sx);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_eq!(sd, bf.rbuf.get_demand());
assert_would_block(bf.read(&mut buf));
bf.rbuf.add_allowance(sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx, bf.rbuf.get_demand());
bf.rbuf.add_allowance(sx + sy);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(bf.rbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert_would_block(bf.read(&mut buf));
assert_eq!(bf.rbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SOpen, bf.rstatus);
drop(unsafe_f); // close f, to force an error on the underlying stream
bf.pre_read();
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SErr, bf.rstatus);
bf.rbuf.add_allowance(sd - sx - sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert!(sd - sx - sx - sx <= sx); // otherwise next step fails
assert_num_bytes(bf.read(&mut buf), sd - sx - sx - sx);
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
Ok(())
}
#[test]
fn write_eof_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(WO(file), 1);
let buf = [0].repeat(1);
assert_num_bytes(bf.write(&buf), 1);
bf.post_write();
assert_eq!(bf.wstatus, SOpen);
bf.wbuf.add_allowance(1);
bf.post_write();
assert_eq!(bf.wstatus, SErr);
assert_error(bf.flush());
assert_error(bf.flush());
assert_error(bf.flush());
Ok(())
}
#[test]
fn write_null_ok() -> io::Result<()> {
let file = OpenOptions::new().write(true).open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(WO(file), sd);
assert_eq!(sd, bf.wbuf.get_demand_cap());
assert_eq!(sd, bf.wbuf.get_demand_remaining());
assert_eq!(0, bf.wbuf.get_demand());
let buf = [0].repeat(sd + sx);
bf.flush()?;
assert_num_bytes(bf.write(&buf), sd);
assert_eq!(sd, bf.wbuf.get_demand());
assert_would_block(bf.write(&buf[sd..]));
bf.wbuf.add_allowance(sx);
bf.post_write();
assert_eq!(sd - sx, bf.wbuf.get_demand());
bf.wbuf.add_allowance(sx + sy);
bf.post_write_exact(sx);
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(bf.wbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert!(bf.post_write_exact(0).is_none());
assert_eq!(bf.wbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
assert_num_bytes(bf.write(&buf), sx + sx);
assert_eq!(sd, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
bf.wbuf.add_allowance(sd);
assert_would_block(bf.flush());
assert_would_block(bf.flush());
assert_would_block(bf.flush());
bf.post_write();
assert_eq!(0, bf.wbuf.get_demand());
bf.flush()
}
} | random_line_split |
|
limit.rs | //! Data structures to help perform rate limiting.
use std::collections::{HashMap, VecDeque};
use std::cmp;
use std::fmt::Debug;
use std::io::{self, Read, Write, ErrorKind};
use std::result::Result;
use bytes::{BytesMut, Buf, BufMut};
use crate::util::RorW;
use self::Status::*;
/// Generic buffer for rate-limiting, both reading and writing.
#[derive(Debug)]
pub struct RLBuf {
/// Buffer to help determine demand, for rate-limiting.
buf: BytesMut,
/// Index into `buf`, of the first data not allowed to be used. Everything
/// before it will be used upon request.
///
/// "Used" means `read` by a higher layer, or `write` by a lower layer.
allowance: usize,
/// Amount of data read out since last call to `reset_usage`.
last_used: usize,
}
impl RLBuf {
/** Create a new `RLBuf` with the given lower bound on the initial capacity.
The actual capacity can be got later with `get_demand_cap`.
*/
pub fn new_lb(init: usize) -> RLBuf {
RLBuf {
buf: BytesMut::with_capacity(init),
allowance: 0,
last_used: 0,
}
}
/** Get the current demand.
For higher-level rate-limiting logic, to determine how to rate-limit.
*/
pub fn get_demand(&self) -> usize {
self.buf.len()
}
/** Get the current buffer capacity, i.e. allocated memory.
For higher-level rate-limiting logic, to monitor resource usage, to help it
analyse how efficient it is.
*/
pub fn get_demand_cap(&self) -> usize {
self.buf.capacity()
}
pub fn get_demand_remaining(&self) -> usize {
self.buf.capacity() - self.buf.len()
}
/** Add the allowance, which must not be greater than the demand.
For higher-level rate-limiting logic, as it performs the rate-limiting.
*/
pub fn add_allowance(&mut self, allowance: usize) {
if self.allowance + allowance > self.get_demand() {
panic!("allowance > demand");
}
self.allowance += allowance
}
/** Return the latest usage figures & reset them back to zero.
The first number is the number of allowed bytes that were unused.
The second number is the number of allowed bytes that were used.
For higher-level rate-limiting logic, before rate-limiting is performed, to
detect consumers that consumed even more slowly than the rate limit in the
previous cycle. In response to this, the higher-level logic should give less
allowance for this consumer, to avoid waste.
*/
pub fn reset_usage(&mut self) -> (usize, usize) {
let wasted = self.allowance;
let used = self.last_used;
self.allowance = 0;
self.last_used = 0;
(wasted, used)
}
fn record_demand(&mut self, buf: &[u8]) {
self.buf.extend_from_slice(buf);
}
fn add_demand_cap(&mut self, more: usize) {
self.buf.reserve(more + self.get_demand_remaining());
}
fn take_allowance(&mut self, taken: usize) {
if taken > self.allowance {
panic!("taken > allowance");
}
self.allowance -= taken;
self.last_used += taken;
}
fn | (&mut self, buf: &mut [u8]) -> usize {
let to_drain = cmp::min(buf.len(), self.allowance);
self.buf.copy_to_slice(&mut buf[..to_drain]);
self.buf.reserve(to_drain);
self.take_allowance(to_drain);
to_drain
}
fn consume_write<F, E>(&mut self, sz: usize, mut write: F) -> (usize, Option<E>)
where F: FnMut (&[u8]) -> Result<usize, E> {
let mut used = 0;
let mut err = None;
let to_drain = cmp::min(self.buf.len(), sz);
match write(&self.buf[..to_drain]) {
Ok(n) => used += n,
Err(e) => err = Some(e),
}
self.buf.advance(used);
self.add_demand_cap(used);
self.take_allowance(used);
(used, err)
}
}
fn unwrap_err_or<T, E>(r: Result<T, E>, de: E) -> E {
match r {
Ok(_) => de,
Err(e) => e,
}
}
#[derive(Debug, PartialEq, Eq)]
enum Status {
SOpen,
SOk, // eof
SErr
}
/** Rate-limited asynchronous analogue of `std::io::BufReader` + `std::io::BufWriter`.
You **must** call `flush()` before dropping this (which closes the stream).
This is even more important than doing so on `BufWriter` - if not, you may lose
data. See https://internals.rust-lang.org/t/asynchronous-destructors/11127/49
for an in-depth explanation.
*/
#[derive(Debug)]
pub struct RateLimited<T> where T:?Sized {
rstatus: Status,
pub(crate) rbuf: RLBuf,
wstatus: Status,
pub(crate) wbuf: RLBuf,
pub(crate) inner: T,
}
impl<T> RateLimited<T> {
/** Create a new `RateLimited` with the given initial capacity.
The inner stream must already be in non-blocking mode.
*/
pub fn new_lb(inner: T, init: usize) -> RateLimited<T> {
RateLimited {
inner: inner,
rstatus: SOpen,
rbuf: RLBuf::new_lb(init),
wstatus: SOpen,
wbuf: RLBuf::new_lb(init),
}
}
}
impl<T> RateLimited<T> where T: RorW +?Sized {
/** Do a pre-read.
That is, do a non-blocking read from the underlying handle, filling up the
remaining part of `rbuf`.
This is to be used by higher-level code, before it performs the rate-limiting.
*/
pub fn pre_read(&mut self) {
match self.rstatus {
SOpen => {
let remain = self.rbuf.get_demand_remaining();
if remain == 0 {
return;
}
// TODO: replace with https://github.com/rust-lang/rfcs/pull/2930
let mut buf: &mut [u8] = unsafe { std::mem::transmute(self.rbuf.buf.bytes_mut()) };
match self.inner.read(&mut buf) { // TODO: assert non-blocking
Ok(0) => {
self.rstatus = SOk;
},
Ok(n) => {
unsafe {
self.rbuf.buf.advance_mut(n);
}
if n >= remain {
// TODO: automatically grow the buffer capacity
log::debug!("rbuf pre_read filled buffer");
}
},
Err(e) => match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
// println!("pre_read: {:?}", e);
self.rstatus = SErr;
}
},
}
},
_ => (), // already finished
}
}
pub fn is_readable(&self) -> bool {
self.rstatus!= SOpen || self.rbuf.allowance > 0
}
/** Do a post-write.
That is, do a non-blocking write to the underlying handle, up to the current
allowance of `wbuf`.
This is to be used by higher-level code, after it performs the rate-limiting.
*/
pub fn post_write(&mut self) {
self.post_write_exact(self.wbuf.allowance);
}
pub fn is_writable(&self) -> bool {
self.wstatus!= SOpen || self.wbuf.get_demand_remaining() > 0
}
// extra param is exposed for testing only
fn post_write_exact(&mut self, sz: usize) -> Option<io::Error> {
match self.wbuf.get_demand() {
0 => None,
_ => match self.wbuf.allowance {
0 => None,
_ => {
let w = &mut self.inner;
let (_, err) = self.wbuf.consume_write(sz, |b| w.write(b));
if let Some(e) = err.as_ref() {
match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
self.wstatus = SErr;
},
}
}
err
}
}
}
}
}
impl<T> Read for RateLimited<T> where T: Read {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.rbuf.get_demand() {
0 => match self.rstatus {
SOpen => Err(io::Error::new(ErrorKind::WouldBlock, "")),
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.read(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
},
_ => match self.rbuf.allowance {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => Ok(self.rbuf.consume_read(buf)),
}
}
}
}
impl<T> Write for RateLimited<T> where T: Write {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self.wstatus {
SOpen => {
// TODO: figure out when it's appropriate to automatically grow the buffer capacity
let remain = self.wbuf.get_demand_remaining();
match remain {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => {
let n = cmp::min(buf.len(), remain);
self.wbuf.record_demand(&buf[..n]);
Ok(n)
}
}
},
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
}
}
fn flush(&mut self) -> io::Result<()> {
match self.wstatus {
SErr =>
// if there was an error, wbuf might not have been consumed, so output error even if wbuf is non-empty
Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
_ => match self.wbuf.get_demand() {
0 => {
//println!("flush OK");
Ok(())
},
_ => {
//println!("flush waiting :( {} {}", self.wbuf.get_demand(), self.wbuf.allowance);
Err(io::Error::new(ErrorKind::WouldBlock, ""))
}, // something else is responsible for calling post_write
}
}
}
}
#[derive(Debug)]
pub struct UsageStats {
samples: VecDeque<(usize, usize)>,
max_samples: usize,
current_usage: (usize, usize), // (waste, used)
}
impl UsageStats {
pub fn new() -> UsageStats {
UsageStats {
samples: VecDeque::new(),
max_samples: 4096, // TODO: make configurable
current_usage: (0, 0),
}
}
pub fn add_current_usage(&mut self, usage: (usize, usize)) {
self.current_usage.0 += usage.0;
self.current_usage.1 += usage.1;
}
pub fn finalise_current_usage(&mut self) -> (usize, usize) {
while self.samples.len() >= self.max_samples {
self.samples.pop_front();
}
let usage = self.current_usage;
self.samples.push_back(usage);
self.current_usage = (0, 0);
usage
}
pub fn estimate_next_usage(&mut self) -> usize {
// TODO: something smarter
// TODO: do something with the waste, e.g. to give more allowance
self.samples.back().unwrap().1
}
}
pub fn derive_allowance<K>(demand: HashMap<K, usize>) -> HashMap<K, usize> {
// TODO: actually perform rate-limiting. the current code ought not
// to be (but is) much slower than the async-io version, however
// this only noticeable on localhost-localhost transfers.
demand
}
#[cfg(test)]
mod tests {
use std::fs::*;
use std::fmt::Debug;
use std::io;
use std::io::*;
use std::assert;
use crate::sys::*;
use crate::util::*;
use super::*;
fn assert_would_block<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => assert_eq!(e.kind(), ErrorKind::WouldBlock),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_error<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => match e.kind() {
ErrorKind::WouldBlock => assert!(false),
ErrorKind::Interrupted => assert!(false),
_ => (),
},
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_num_bytes(res: io::Result<usize>, s: usize) {
match res {
Ok(n) => assert_eq!(n, s),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
// TODO: /dev/null etc is not a RawSocket in windows
#[test]
fn read_eof_ok() -> io::Result<()> {
let file = File::open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(RO(file), 1);
let mut buf = [0].repeat(1);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_num_bytes(bf.read(&mut buf), 0); // eof
Ok(())
}
#[test]
fn read_zero_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let unsafe_f = unsafe { File::from_raw_source(file.as_raw_source()) };
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(RO(file), sd);
assert_eq!(sd, bf.rbuf.get_demand_cap());
assert_eq!(0, bf.rbuf.get_demand());
let mut buf = [0].repeat(sx);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_eq!(sd, bf.rbuf.get_demand());
assert_would_block(bf.read(&mut buf));
bf.rbuf.add_allowance(sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx, bf.rbuf.get_demand());
bf.rbuf.add_allowance(sx + sy);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(bf.rbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert_would_block(bf.read(&mut buf));
assert_eq!(bf.rbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SOpen, bf.rstatus);
drop(unsafe_f); // close f, to force an error on the underlying stream
bf.pre_read();
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SErr, bf.rstatus);
bf.rbuf.add_allowance(sd - sx - sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert!(sd - sx - sx - sx <= sx); // otherwise next step fails
assert_num_bytes(bf.read(&mut buf), sd - sx - sx - sx);
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
Ok(())
}
#[test]
fn write_eof_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(WO(file), 1);
let buf = [0].repeat(1);
assert_num_bytes(bf.write(&buf), 1);
bf.post_write();
assert_eq!(bf.wstatus, SOpen);
bf.wbuf.add_allowance(1);
bf.post_write();
assert_eq!(bf.wstatus, SErr);
assert_error(bf.flush());
assert_error(bf.flush());
assert_error(bf.flush());
Ok(())
}
#[test]
fn write_null_ok() -> io::Result<()> {
let file = OpenOptions::new().write(true).open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(WO(file), sd);
assert_eq!(sd, bf.wbuf.get_demand_cap());
assert_eq!(sd, bf.wbuf.get_demand_remaining());
assert_eq!(0, bf.wbuf.get_demand());
let buf = [0].repeat(sd + sx);
bf.flush()?;
assert_num_bytes(bf.write(&buf), sd);
assert_eq!(sd, bf.wbuf.get_demand());
assert_would_block(bf.write(&buf[sd..]));
bf.wbuf.add_allowance(sx);
bf.post_write();
assert_eq!(sd - sx, bf.wbuf.get_demand());
bf.wbuf.add_allowance(sx + sy);
bf.post_write_exact(sx);
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(bf.wbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert!(bf.post_write_exact(0).is_none());
assert_eq!(bf.wbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
assert_num_bytes(bf.write(&buf), sx + sx);
assert_eq!(sd, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
bf.wbuf.add_allowance(sd);
assert_would_block(bf.flush());
assert_would_block(bf.flush());
assert_would_block(bf.flush());
bf.post_write();
assert_eq!(0, bf.wbuf.get_demand());
bf.flush()
}
}
| consume_read | identifier_name |
de.rs | //! Deserialization support for the `application/x-www-form-urlencoded` format.
use serde::de;
use std::collections::{
HashMap,
};
use std::borrow::Cow;
#[doc(inline)]
pub use serde::de::value::Error;
use serde::de::value::MapDeserializer;
use std::io::Read;
// use url::form_urlencoded::Parse as UrlEncodedParse;
use url::form_urlencoded::parse;
/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_bytes::<Vec<(String, String)>>(
/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_bytes<T: de::Deserialize>(input: &[u8]) -> Result<T, Error> {
T::deserialize(Deserializer::new(input))
}
/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_str::<Vec<(String, String)>>(
/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_str<T: de::Deserialize>(input: &str) -> Result<T, Error> {
from_bytes(input.as_bytes())
}
/// Convenience function that reads all bytes from `reader` and deserializes
/// them with `from_bytes`.
pub fn from_reader<T, R>(mut reader: R) -> Result<T, Error>
where T: de::Deserialize, R: Read
{
let mut buf = vec![];
reader.read_to_end(&mut buf)
.map_err(|e| {
de::Error::custom(format_args!("could not read input: {}", e))
})?;
from_bytes(&buf)
}
/// A deserializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level outputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'a> {
// value: &'a [u8],
// map: HashMap<Cow<'a, str>, Level<'a>>,
// parser: Option<UrlEncodedParse<'a>>,
iter: iter::Peekable<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}]? Ordering not clear.
if next_rest!= "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
Deserializer::with_map(x).deserialize_map(visitor)
} else {
Err(de::Error::custom("value does not appear to be a map"))
}
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
self.deserialize_map(visitor)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
| forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
impl<'a> ValueDeserializer for Level<'a>
{
type Deserializer = LevelDeserializer<'a>;
fn into_deserializer(self) -> Self::Deserializer {
LevelDeserializer(self)
}
}
| // visitor.visit_seq(self)
if let Level::Sequence(x) = self.0 {
SeqDeserializer::new(x.into_iter()).deserialize(visitor)
} else {
Err(de::Error::custom("value does not appear to be a sequence"))
}
}
| identifier_body |
de.rs | //! Deserialization support for the `application/x-www-form-urlencoded` format.
use serde::de;
use std::collections::{
HashMap,
};
use std::borrow::Cow;
#[doc(inline)]
pub use serde::de::value::Error;
use serde::de::value::MapDeserializer;
use std::io::Read;
// use url::form_urlencoded::Parse as UrlEncodedParse;
use url::form_urlencoded::parse;
/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_bytes::<Vec<(String, String)>>(
/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_bytes<T: de::Deserialize>(input: &[u8]) -> Result<T, Error> {
T::deserialize(Deserializer::new(input))
}
/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_str::<Vec<(String, String)>>(
/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_str<T: de::Deserialize>(input: &str) -> Result<T, Error> {
from_bytes(input.as_bytes())
}
/// Convenience function that reads all bytes from `reader` and deserializes
/// them with `from_bytes`.
pub fn from_reader<T, R>(mut reader: R) -> Result<T, Error>
where T: de::Deserialize, R: Read
{
let mut buf = vec![];
reader.read_to_end(&mut buf)
.map_err(|e| {
de::Error::custom(format_args!("could not read input: {}", e))
})?;
from_bytes(&buf)
}
/// A deserializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level outputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'a> {
// value: &'a [u8],
// map: HashMap<Cow<'a, str>, Level<'a>>,
// parser: Option<UrlEncodedParse<'a>>,
iter: iter::Peekable<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}]? Ordering not clear.
if next_rest!= "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
Deserializer::with_map(x).deserialize_map(visitor)
} else {
Err(de::Error::custom("value does not appear to be a map"))
}
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor |
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
// visitor.visit_seq(self)
if let Level::Sequence(x) = self.0 {
SeqDeserializer::new(x.into_iter()).deserialize(visitor)
} else {
Err(de::Error::custom("value does not appear to be a sequence"))
}
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
impl<'a> ValueDeserializer for Level<'a>
{
type Deserializer = LevelDeserializer<'a>;
fn into_deserializer(self) -> Self::Deserializer {
LevelDeserializer(self)
}
} | {
self.deserialize_map(visitor)
} | random_line_split |
de.rs | //! Deserialization support for the `application/x-www-form-urlencoded` format.
use serde::de;
use std::collections::{
HashMap,
};
use std::borrow::Cow;
#[doc(inline)]
pub use serde::de::value::Error;
use serde::de::value::MapDeserializer;
use std::io::Read;
// use url::form_urlencoded::Parse as UrlEncodedParse;
use url::form_urlencoded::parse;
/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_bytes::<Vec<(String, String)>>(
/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_bytes<T: de::Deserialize>(input: &[u8]) -> Result<T, Error> {
T::deserialize(Deserializer::new(input))
}
/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_str::<Vec<(String, String)>>(
/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_str<T: de::Deserialize>(input: &str) -> Result<T, Error> {
from_bytes(input.as_bytes())
}
/// Convenience function that reads all bytes from `reader` and deserializes
/// them with `from_bytes`.
pub fn from_reader<T, R>(mut reader: R) -> Result<T, Error>
where T: de::Deserialize, R: Read
{
let mut buf = vec![];
reader.read_to_end(&mut buf)
.map_err(|e| {
de::Error::custom(format_args!("could not read input: {}", e))
})?;
from_bytes(&buf)
}
/// A deserializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level outputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'a> {
// value: &'a [u8],
// map: HashMap<Cow<'a, str>, Level<'a>>,
// parser: Option<UrlEncodedParse<'a>>,
iter: iter::Peekable<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}]? Ordering not clear.
if next_rest!= "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn de | >(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
Deserializer::with_map(x).deserialize_map(visitor)
} else {
Err(de::Error::custom("value does not appear to be a map"))
}
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
self.deserialize_map(visitor)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
// visitor.visit_seq(self)
if let Level::Sequence(x) = self.0 {
SeqDeserializer::new(x.into_iter()).deserialize(visitor)
} else {
Err(de::Error::custom("value does not appear to be a sequence"))
}
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
impl<'a> ValueDeserializer for Level<'a>
{
type Deserializer = LevelDeserializer<'a>;
fn into_deserializer(self) -> Self::Deserializer {
LevelDeserializer(self)
}
}
| serialize_struct<V | identifier_name |
de.rs | //! Deserialization support for the `application/x-www-form-urlencoded` format.
use serde::de;
use std::collections::{
HashMap,
};
use std::borrow::Cow;
#[doc(inline)]
pub use serde::de::value::Error;
use serde::de::value::MapDeserializer;
use std::io::Read;
// use url::form_urlencoded::Parse as UrlEncodedParse;
use url::form_urlencoded::parse;
/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_bytes::<Vec<(String, String)>>(
/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_bytes<T: de::Deserialize>(input: &[u8]) -> Result<T, Error> {
T::deserialize(Deserializer::new(input))
}
/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_str::<Vec<(String, String)>>(
/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_str<T: de::Deserialize>(input: &str) -> Result<T, Error> {
from_bytes(input.as_bytes())
}
/// Convenience function that reads all bytes from `reader` and deserializes
/// them with `from_bytes`.
pub fn from_reader<T, R>(mut reader: R) -> Result<T, Error>
where T: de::Deserialize, R: Read
{
let mut buf = vec![];
reader.read_to_end(&mut buf)
.map_err(|e| {
de::Error::custom(format_args!("could not read input: {}", e))
})?;
from_bytes(&buf)
}
/// A deserializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level outputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'a> {
// value: &'a [u8],
// map: HashMap<Cow<'a, str>, Level<'a>>,
// parser: Option<UrlEncodedParse<'a>>,
iter: iter::Peekable<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}]? Ordering not clear.
if next_rest!= "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
| lse {
Err(de::Error::custom("value does not appear to be a map"))
}
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
self.deserialize_map(visitor)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
// visitor.visit_seq(self)
if let Level::Sequence(x) = self.0 {
SeqDeserializer::new(x.into_iter()).deserialize(visitor)
} else {
Err(de::Error::custom("value does not appear to be a sequence"))
}
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
impl<'a> ValueDeserializer for Level<'a>
{
type Deserializer = LevelDeserializer<'a>;
fn into_deserializer(self) -> Self::Deserializer {
LevelDeserializer(self)
}
}
| Deserializer::with_map(x).deserialize_map(visitor)
} e | conditional_block |
model.rs | use super::{
constants::*, empty_named_tuple, rewrite::Rewrite, sequent::RelSequent, symbol::Symbol, Error,
NamedTuple, Tuple,
};
use crate::chase::{r#impl::basic::BasicWitnessTerm, Model, Observation, E};
use codd::expression as rel_exp;
use itertools::Itertools;
use razor_fol::syntax::Sig;
use std::{collections::HashMap, fmt};
/// Implements an instance of [`Model`] with an underlying database.
/// It uses [`BasicWitnessTerm`] from the basic implementation to represent observations.
///
/// [`Model`]: crate::chase::Model
/// [`WitnessTerm`]: crate::chase::impl::basic::BasicWitnessTerm
pub struct RelModel {
/// Is a unique identifier for this model.
id: u64,
/// Keeps track of the next index to assign to a new element of this model.
element_index: i32,
/// Maps *flat* witness terms to elements of this model.
///
/// **Hint**: Flat (witness) terms are terms that do not contain any complex sub-terms
/// that consist of functions applications.
rewrites: HashMap<BasicWitnessTerm, E>,
/// Stores the information contained in this model.
database: codd::Database,
/// Maps each symbol to their corresponding relational expression.
relations: HashMap<Symbol, rel_exp::Relation<Tuple>>,
}
impl RelModel {
/// Creates a new model over the given `signature`.
pub fn new(signature: &Sig) -> Self {
let mut database = codd::Database::new();
let relations = relations_map(signature, &mut database).unwrap();
Self {
id: rand::random(),
element_index: 0,
rewrites: HashMap::new(),
database,
relations,
}
}
/// Creates a new element for the given `witness` and records that `witness`
/// denotes the new element.
fn new_element(&mut self, witness: BasicWitnessTerm) -> E {
let element = E(self.element_index);
self.element_index += 1;
self.rewrites.insert(witness, element);
element
}
// assumes that the witness term is flat
pub(super) fn record(&mut self, witness: BasicWitnessTerm) -> E {
match witness {
BasicWitnessTerm::Elem(e) => e,
_ => self
.rewrites
.get(&witness)
.copied()
.unwrap_or_else(|| self.new_element(witness)),
}
}
/// Evaluates a sequent in the model.
pub(super) fn evaluate<'a>(&self, sequent: &'a RelSequent) -> Vec<NamedTuple<'a>> {
let tuples = self.database.evaluate(sequent.expression()).unwrap();
tuples
.into_tuples()
.into_iter()
.map(|tuple| {
let mut elements = empty_named_tuple();
for (i, attr) in sequent.attributes().iter().enumerate() {
elements.insert(attr, tuple[i]);
}
elements
})
.collect()
}
pub(super) fn insert(
&mut self,
symbol: &Symbol,
mut tuples: codd::Tuples<Tuple>,
) -> Result<(), Error> {
// record result of function applications as a witness term to minimize
// creating new elements later on:
match symbol {
Symbol::Const(_) => {
for t in tuples.iter() {
self.rewrites.entry(symbol.witness(&[])?).or_insert(t[0]);
}
}
Symbol::Func { arity,.. } => {
for t in tuples.iter() {
self.rewrites
.entry(symbol.witness(&t[0..(*arity as usize)])?)
.or_insert(t[*arity as usize]);
}
} |
if let Some(relation) = self.relations.get(symbol) {
if let Symbol::Equality = symbol {
let to_add = tuples.iter().map(|t| vec![t[1], t[0]]).collect_vec();
tuples.extend(to_add);
};
self.database.insert(relation, tuples).map_err(Error::from)
} else {
Err(Error::MissingSymbol {
symbol: symbol.to_string(),
})
}
}
/// Returns a mutable reference to the underlying database of this model.
pub(super) fn database_mut(&mut self) -> &mut codd::Database {
&mut self.database
}
fn equation_rewrites(&self) -> Result<Rewrite<E>, Error> {
let mut rewrite = Rewrite::new();
let eq_relation = self
.relations
.get(&Symbol::Equality)
.ok_or(Error::MissingSymbol {
symbol: EQUALITY.into(),
})?;
let equations = self.database.evaluate(&eq_relation)?;
for eq in equations.iter() {
rewrite.rewrite(&eq[0], &eq[1])
}
Ok(rewrite)
}
fn rewrite_model(&mut self, rewrite: &Rewrite<E>) {
let mut conversion_map = HashMap::new();
let normal_forms = rewrite.normal_forms().into_iter().sorted();
for (count, item) in normal_forms.into_iter().enumerate() {
conversion_map.insert(item, E(count as i32));
}
let domain = self.domain();
for element in domain.iter() {
let canonical = rewrite.normalize(element).unwrap();
if conversion_map.contains_key(element) {
continue;
}
let convert = *conversion_map
.get(rewrite.normalize(canonical).unwrap())
.unwrap();
conversion_map.insert(element, convert);
}
let mut rewrites = HashMap::new();
for (term, element) in &self.rewrites {
let new_term = match &term {
BasicWitnessTerm::Elem(e) => {
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
}
BasicWitnessTerm::Const(_) => term.clone(),
BasicWitnessTerm::App { function, terms } => BasicWitnessTerm::App {
function: function.clone(),
terms: terms
.iter()
.map(|e| {
let e = match e {
BasicWitnessTerm::Elem(e) => e,
_ => unreachable!(),
};
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
})
.collect(),
},
};
let new_element = *conversion_map.get(element).unwrap();
rewrites.insert(new_term, new_element);
}
let mut database = codd::Database::new();
for relation in self.relations.values() {
let new_relation = database.add_relation(relation.name()).unwrap();
let tuples = self.database.evaluate(relation).unwrap();
let new_tuples: codd::Tuples<_> = tuples
.into_tuples()
.into_iter()
.map(|tuple| {
tuple
.into_iter()
.map(|e| *conversion_map.get(&e).unwrap())
.collect_vec()
})
.collect_vec()
.into();
database.insert(&new_relation, new_tuples).unwrap();
}
self.rewrites = rewrites;
self.database = database;
}
}
impl Model for RelModel {
type TermType = BasicWitnessTerm;
fn get_id(&self) -> u64 {
self.id
}
fn domain(&self) -> Vec<E> {
self.database
.evaluate(self.relations.get(&Symbol::Domain).unwrap())
.unwrap()
.iter()
.map(|e| e[0])
.collect()
}
fn facts(&self) -> Vec<Observation<Self::TermType>> {
let mut result = Vec::new();
for (symbol, relation) in &self.relations {
match symbol {
Symbol::Domain | Symbol::Equality => {}
_ => {
let observations = Vec::new();
let tuples = self.database.evaluate(relation).unwrap();
for t in tuples.into_tuples() {
result.push(symbol.observation(&t).unwrap());
}
result.extend(observations);
}
}
}
result
}
fn witness(&self, element: &E) -> Vec<BasicWitnessTerm> {
self.rewrites
.iter()
.filter(|(_, e)| *e == element)
.map(|(t, _)| t)
.cloned()
.collect()
}
fn element(&self, witness: &BasicWitnessTerm) -> Option<E> {
match witness {
BasicWitnessTerm::Elem(element) => self.domain().into_iter().find(|e| e == element),
BasicWitnessTerm::Const(_) => self.rewrites.get(witness).cloned(),
BasicWitnessTerm::App { function, terms } => {
let terms: Vec<Option<E>> = terms.iter().map(|t| self.element(t)).collect();
if terms.iter().any(|e| e.is_none()) {
None
} else {
let terms: Vec<BasicWitnessTerm> =
terms.into_iter().map(|e| e.unwrap().into()).collect();
self.rewrites
.get(&BasicWitnessTerm::App {
function: (*function).clone(),
terms,
})
.cloned()
}
}
}
}
fn finalize(mut self) -> Self {
let rewrites = self.equation_rewrites().unwrap();
self.rewrite_model(&rewrites);
self
}
}
impl Clone for RelModel {
fn clone(&self) -> Self {
Self {
id: rand::random(),
element_index: self.element_index,
rewrites: self.rewrites.clone(),
database: self.database.clone(),
relations: self.relations.clone(),
}
}
}
impl fmt::Debug for RelModel {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let domain: Vec<String> = self.domain().into_iter().map(|e| e.to_string()).collect();
let elements: Vec<String> = self
.domain()
.iter()
.sorted()
.iter()
.map(|e| {
let witnesses: Vec<String> =
self.witness(e).iter().map(|w| w.to_string()).collect();
let witnesses = witnesses.into_iter().sorted();
format!("{} -> {}", witnesses.into_iter().sorted().join(", "), e)
})
.collect();
let facts: Vec<String> = self.facts().into_iter().map(|e| e.to_string()).collect();
write!(
f,
"Domain: {{{}}}\nElements:{}\nFacts: {}\n",
domain.join(", "),
elements.join(", "),
facts.join(", ")
)
}
}
// Creates a dictionary of signatures and their corresponding relations to
// access their instances in the database.
fn relations_map(
sig: &Sig,
db: &mut codd::Database,
) -> Result<HashMap<Symbol, rel_exp::Relation<Tuple>>, Error> {
let mut relations = HashMap::new();
for c in sig.constants().iter() {
let name = constant_instance_name(c);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(Symbol::Const(c.clone()), relation);
}
for f in sig.functions().values() {
let name = function_instance_name(&f.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Func {
symbol: f.symbol.clone(),
arity: f.arity,
},
relation,
);
}
for p in sig.predicates().values() {
if p.symbol.name() == EQUALITY {
continue; // Equality is a special case (below)
}
let name = predicate_instance_name(&p.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Pred {
symbol: p.symbol.clone(),
arity: p.arity,
},
relation,
);
}
relations.insert(Symbol::Domain, db.add_relation::<Tuple>(DOMAIN)?);
relations
.entry(Symbol::Equality)
.or_insert(db.add_relation::<Tuple>(EQUALITY)?);
Ok(relations)
} | _ => {}
} | random_line_split |
model.rs | use super::{
constants::*, empty_named_tuple, rewrite::Rewrite, sequent::RelSequent, symbol::Symbol, Error,
NamedTuple, Tuple,
};
use crate::chase::{r#impl::basic::BasicWitnessTerm, Model, Observation, E};
use codd::expression as rel_exp;
use itertools::Itertools;
use razor_fol::syntax::Sig;
use std::{collections::HashMap, fmt};
/// Implements an instance of [`Model`] with an underlying database.
/// It uses [`BasicWitnessTerm`] from the basic implementation to represent observations.
///
/// [`Model`]: crate::chase::Model
/// [`WitnessTerm`]: crate::chase::impl::basic::BasicWitnessTerm
pub struct RelModel {
/// Is a unique identifier for this model.
id: u64,
/// Keeps track of the next index to assign to a new element of this model.
element_index: i32,
/// Maps *flat* witness terms to elements of this model.
///
/// **Hint**: Flat (witness) terms are terms that do not contain any complex sub-terms
/// that consist of functions applications.
rewrites: HashMap<BasicWitnessTerm, E>,
/// Stores the information contained in this model.
database: codd::Database,
/// Maps each symbol to their corresponding relational expression.
relations: HashMap<Symbol, rel_exp::Relation<Tuple>>,
}
impl RelModel {
/// Creates a new model over the given `signature`.
pub fn new(signature: &Sig) -> Self {
let mut database = codd::Database::new();
let relations = relations_map(signature, &mut database).unwrap();
Self {
id: rand::random(),
element_index: 0,
rewrites: HashMap::new(),
database,
relations,
}
}
/// Creates a new element for the given `witness` and records that `witness`
/// denotes the new element.
fn new_element(&mut self, witness: BasicWitnessTerm) -> E |
// assumes that the witness term is flat
pub(super) fn record(&mut self, witness: BasicWitnessTerm) -> E {
match witness {
BasicWitnessTerm::Elem(e) => e,
_ => self
.rewrites
.get(&witness)
.copied()
.unwrap_or_else(|| self.new_element(witness)),
}
}
/// Evaluates a sequent in the model.
pub(super) fn evaluate<'a>(&self, sequent: &'a RelSequent) -> Vec<NamedTuple<'a>> {
let tuples = self.database.evaluate(sequent.expression()).unwrap();
tuples
.into_tuples()
.into_iter()
.map(|tuple| {
let mut elements = empty_named_tuple();
for (i, attr) in sequent.attributes().iter().enumerate() {
elements.insert(attr, tuple[i]);
}
elements
})
.collect()
}
pub(super) fn insert(
&mut self,
symbol: &Symbol,
mut tuples: codd::Tuples<Tuple>,
) -> Result<(), Error> {
// record result of function applications as a witness term to minimize
// creating new elements later on:
match symbol {
Symbol::Const(_) => {
for t in tuples.iter() {
self.rewrites.entry(symbol.witness(&[])?).or_insert(t[0]);
}
}
Symbol::Func { arity,.. } => {
for t in tuples.iter() {
self.rewrites
.entry(symbol.witness(&t[0..(*arity as usize)])?)
.or_insert(t[*arity as usize]);
}
}
_ => {}
}
if let Some(relation) = self.relations.get(symbol) {
if let Symbol::Equality = symbol {
let to_add = tuples.iter().map(|t| vec![t[1], t[0]]).collect_vec();
tuples.extend(to_add);
};
self.database.insert(relation, tuples).map_err(Error::from)
} else {
Err(Error::MissingSymbol {
symbol: symbol.to_string(),
})
}
}
/// Returns a mutable reference to the underlying database of this model.
pub(super) fn database_mut(&mut self) -> &mut codd::Database {
&mut self.database
}
fn equation_rewrites(&self) -> Result<Rewrite<E>, Error> {
let mut rewrite = Rewrite::new();
let eq_relation = self
.relations
.get(&Symbol::Equality)
.ok_or(Error::MissingSymbol {
symbol: EQUALITY.into(),
})?;
let equations = self.database.evaluate(&eq_relation)?;
for eq in equations.iter() {
rewrite.rewrite(&eq[0], &eq[1])
}
Ok(rewrite)
}
fn rewrite_model(&mut self, rewrite: &Rewrite<E>) {
let mut conversion_map = HashMap::new();
let normal_forms = rewrite.normal_forms().into_iter().sorted();
for (count, item) in normal_forms.into_iter().enumerate() {
conversion_map.insert(item, E(count as i32));
}
let domain = self.domain();
for element in domain.iter() {
let canonical = rewrite.normalize(element).unwrap();
if conversion_map.contains_key(element) {
continue;
}
let convert = *conversion_map
.get(rewrite.normalize(canonical).unwrap())
.unwrap();
conversion_map.insert(element, convert);
}
let mut rewrites = HashMap::new();
for (term, element) in &self.rewrites {
let new_term = match &term {
BasicWitnessTerm::Elem(e) => {
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
}
BasicWitnessTerm::Const(_) => term.clone(),
BasicWitnessTerm::App { function, terms } => BasicWitnessTerm::App {
function: function.clone(),
terms: terms
.iter()
.map(|e| {
let e = match e {
BasicWitnessTerm::Elem(e) => e,
_ => unreachable!(),
};
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
})
.collect(),
},
};
let new_element = *conversion_map.get(element).unwrap();
rewrites.insert(new_term, new_element);
}
let mut database = codd::Database::new();
for relation in self.relations.values() {
let new_relation = database.add_relation(relation.name()).unwrap();
let tuples = self.database.evaluate(relation).unwrap();
let new_tuples: codd::Tuples<_> = tuples
.into_tuples()
.into_iter()
.map(|tuple| {
tuple
.into_iter()
.map(|e| *conversion_map.get(&e).unwrap())
.collect_vec()
})
.collect_vec()
.into();
database.insert(&new_relation, new_tuples).unwrap();
}
self.rewrites = rewrites;
self.database = database;
}
}
impl Model for RelModel {
type TermType = BasicWitnessTerm;
fn get_id(&self) -> u64 {
self.id
}
fn domain(&self) -> Vec<E> {
self.database
.evaluate(self.relations.get(&Symbol::Domain).unwrap())
.unwrap()
.iter()
.map(|e| e[0])
.collect()
}
fn facts(&self) -> Vec<Observation<Self::TermType>> {
let mut result = Vec::new();
for (symbol, relation) in &self.relations {
match symbol {
Symbol::Domain | Symbol::Equality => {}
_ => {
let observations = Vec::new();
let tuples = self.database.evaluate(relation).unwrap();
for t in tuples.into_tuples() {
result.push(symbol.observation(&t).unwrap());
}
result.extend(observations);
}
}
}
result
}
fn witness(&self, element: &E) -> Vec<BasicWitnessTerm> {
self.rewrites
.iter()
.filter(|(_, e)| *e == element)
.map(|(t, _)| t)
.cloned()
.collect()
}
fn element(&self, witness: &BasicWitnessTerm) -> Option<E> {
match witness {
BasicWitnessTerm::Elem(element) => self.domain().into_iter().find(|e| e == element),
BasicWitnessTerm::Const(_) => self.rewrites.get(witness).cloned(),
BasicWitnessTerm::App { function, terms } => {
let terms: Vec<Option<E>> = terms.iter().map(|t| self.element(t)).collect();
if terms.iter().any(|e| e.is_none()) {
None
} else {
let terms: Vec<BasicWitnessTerm> =
terms.into_iter().map(|e| e.unwrap().into()).collect();
self.rewrites
.get(&BasicWitnessTerm::App {
function: (*function).clone(),
terms,
})
.cloned()
}
}
}
}
fn finalize(mut self) -> Self {
let rewrites = self.equation_rewrites().unwrap();
self.rewrite_model(&rewrites);
self
}
}
impl Clone for RelModel {
fn clone(&self) -> Self {
Self {
id: rand::random(),
element_index: self.element_index,
rewrites: self.rewrites.clone(),
database: self.database.clone(),
relations: self.relations.clone(),
}
}
}
impl fmt::Debug for RelModel {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let domain: Vec<String> = self.domain().into_iter().map(|e| e.to_string()).collect();
let elements: Vec<String> = self
.domain()
.iter()
.sorted()
.iter()
.map(|e| {
let witnesses: Vec<String> =
self.witness(e).iter().map(|w| w.to_string()).collect();
let witnesses = witnesses.into_iter().sorted();
format!("{} -> {}", witnesses.into_iter().sorted().join(", "), e)
})
.collect();
let facts: Vec<String> = self.facts().into_iter().map(|e| e.to_string()).collect();
write!(
f,
"Domain: {{{}}}\nElements:{}\nFacts: {}\n",
domain.join(", "),
elements.join(", "),
facts.join(", ")
)
}
}
// Creates a dictionary of signatures and their corresponding relations to
// access their instances in the database.
fn relations_map(
sig: &Sig,
db: &mut codd::Database,
) -> Result<HashMap<Symbol, rel_exp::Relation<Tuple>>, Error> {
let mut relations = HashMap::new();
for c in sig.constants().iter() {
let name = constant_instance_name(c);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(Symbol::Const(c.clone()), relation);
}
for f in sig.functions().values() {
let name = function_instance_name(&f.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Func {
symbol: f.symbol.clone(),
arity: f.arity,
},
relation,
);
}
for p in sig.predicates().values() {
if p.symbol.name() == EQUALITY {
continue; // Equality is a special case (below)
}
let name = predicate_instance_name(&p.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Pred {
symbol: p.symbol.clone(),
arity: p.arity,
},
relation,
);
}
relations.insert(Symbol::Domain, db.add_relation::<Tuple>(DOMAIN)?);
relations
.entry(Symbol::Equality)
.or_insert(db.add_relation::<Tuple>(EQUALITY)?);
Ok(relations)
}
| {
let element = E(self.element_index);
self.element_index += 1;
self.rewrites.insert(witness, element);
element
} | identifier_body |
model.rs | use super::{
constants::*, empty_named_tuple, rewrite::Rewrite, sequent::RelSequent, symbol::Symbol, Error,
NamedTuple, Tuple,
};
use crate::chase::{r#impl::basic::BasicWitnessTerm, Model, Observation, E};
use codd::expression as rel_exp;
use itertools::Itertools;
use razor_fol::syntax::Sig;
use std::{collections::HashMap, fmt};
/// Implements an instance of [`Model`] with an underlying database.
/// It uses [`BasicWitnessTerm`] from the basic implementation to represent observations.
///
/// [`Model`]: crate::chase::Model
/// [`WitnessTerm`]: crate::chase::impl::basic::BasicWitnessTerm
pub struct RelModel {
/// Is a unique identifier for this model.
id: u64,
/// Keeps track of the next index to assign to a new element of this model.
element_index: i32,
/// Maps *flat* witness terms to elements of this model.
///
/// **Hint**: Flat (witness) terms are terms that do not contain any complex sub-terms
/// that consist of functions applications.
rewrites: HashMap<BasicWitnessTerm, E>,
/// Stores the information contained in this model.
database: codd::Database,
/// Maps each symbol to their corresponding relational expression.
relations: HashMap<Symbol, rel_exp::Relation<Tuple>>,
}
impl RelModel {
/// Creates a new model over the given `signature`.
pub fn new(signature: &Sig) -> Self {
let mut database = codd::Database::new();
let relations = relations_map(signature, &mut database).unwrap();
Self {
id: rand::random(),
element_index: 0,
rewrites: HashMap::new(),
database,
relations,
}
}
/// Creates a new element for the given `witness` and records that `witness`
/// denotes the new element.
fn new_element(&mut self, witness: BasicWitnessTerm) -> E {
let element = E(self.element_index);
self.element_index += 1;
self.rewrites.insert(witness, element);
element
}
// assumes that the witness term is flat
pub(super) fn record(&mut self, witness: BasicWitnessTerm) -> E {
match witness {
BasicWitnessTerm::Elem(e) => e,
_ => self
.rewrites
.get(&witness)
.copied()
.unwrap_or_else(|| self.new_element(witness)),
}
}
/// Evaluates a sequent in the model.
pub(super) fn evaluate<'a>(&self, sequent: &'a RelSequent) -> Vec<NamedTuple<'a>> {
let tuples = self.database.evaluate(sequent.expression()).unwrap();
tuples
.into_tuples()
.into_iter()
.map(|tuple| {
let mut elements = empty_named_tuple();
for (i, attr) in sequent.attributes().iter().enumerate() {
elements.insert(attr, tuple[i]);
}
elements
})
.collect()
}
pub(super) fn insert(
&mut self,
symbol: &Symbol,
mut tuples: codd::Tuples<Tuple>,
) -> Result<(), Error> {
// record result of function applications as a witness term to minimize
// creating new elements later on:
match symbol {
Symbol::Const(_) => {
for t in tuples.iter() {
self.rewrites.entry(symbol.witness(&[])?).or_insert(t[0]);
}
}
Symbol::Func { arity,.. } => {
for t in tuples.iter() {
self.rewrites
.entry(symbol.witness(&t[0..(*arity as usize)])?)
.or_insert(t[*arity as usize]);
}
}
_ => {}
}
if let Some(relation) = self.relations.get(symbol) {
if let Symbol::Equality = symbol {
let to_add = tuples.iter().map(|t| vec![t[1], t[0]]).collect_vec();
tuples.extend(to_add);
};
self.database.insert(relation, tuples).map_err(Error::from)
} else {
Err(Error::MissingSymbol {
symbol: symbol.to_string(),
})
}
}
/// Returns a mutable reference to the underlying database of this model.
pub(super) fn database_mut(&mut self) -> &mut codd::Database {
&mut self.database
}
fn equation_rewrites(&self) -> Result<Rewrite<E>, Error> {
let mut rewrite = Rewrite::new();
let eq_relation = self
.relations
.get(&Symbol::Equality)
.ok_or(Error::MissingSymbol {
symbol: EQUALITY.into(),
})?;
let equations = self.database.evaluate(&eq_relation)?;
for eq in equations.iter() {
rewrite.rewrite(&eq[0], &eq[1])
}
Ok(rewrite)
}
fn rewrite_model(&mut self, rewrite: &Rewrite<E>) {
let mut conversion_map = HashMap::new();
let normal_forms = rewrite.normal_forms().into_iter().sorted();
for (count, item) in normal_forms.into_iter().enumerate() {
conversion_map.insert(item, E(count as i32));
}
let domain = self.domain();
for element in domain.iter() {
let canonical = rewrite.normalize(element).unwrap();
if conversion_map.contains_key(element) {
continue;
}
let convert = *conversion_map
.get(rewrite.normalize(canonical).unwrap())
.unwrap();
conversion_map.insert(element, convert);
}
let mut rewrites = HashMap::new();
for (term, element) in &self.rewrites {
let new_term = match &term {
BasicWitnessTerm::Elem(e) => {
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
}
BasicWitnessTerm::Const(_) => term.clone(),
BasicWitnessTerm::App { function, terms } => BasicWitnessTerm::App {
function: function.clone(),
terms: terms
.iter()
.map(|e| {
let e = match e {
BasicWitnessTerm::Elem(e) => e,
_ => unreachable!(),
};
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
})
.collect(),
},
};
let new_element = *conversion_map.get(element).unwrap();
rewrites.insert(new_term, new_element);
}
let mut database = codd::Database::new();
for relation in self.relations.values() {
let new_relation = database.add_relation(relation.name()).unwrap();
let tuples = self.database.evaluate(relation).unwrap();
let new_tuples: codd::Tuples<_> = tuples
.into_tuples()
.into_iter()
.map(|tuple| {
tuple
.into_iter()
.map(|e| *conversion_map.get(&e).unwrap())
.collect_vec()
})
.collect_vec()
.into();
database.insert(&new_relation, new_tuples).unwrap();
}
self.rewrites = rewrites;
self.database = database;
}
}
impl Model for RelModel {
type TermType = BasicWitnessTerm;
fn get_id(&self) -> u64 {
self.id
}
fn domain(&self) -> Vec<E> {
self.database
.evaluate(self.relations.get(&Symbol::Domain).unwrap())
.unwrap()
.iter()
.map(|e| e[0])
.collect()
}
fn facts(&self) -> Vec<Observation<Self::TermType>> {
let mut result = Vec::new();
for (symbol, relation) in &self.relations {
match symbol {
Symbol::Domain | Symbol::Equality => {}
_ => {
let observations = Vec::new();
let tuples = self.database.evaluate(relation).unwrap();
for t in tuples.into_tuples() {
result.push(symbol.observation(&t).unwrap());
}
result.extend(observations);
}
}
}
result
}
fn | (&self, element: &E) -> Vec<BasicWitnessTerm> {
self.rewrites
.iter()
.filter(|(_, e)| *e == element)
.map(|(t, _)| t)
.cloned()
.collect()
}
fn element(&self, witness: &BasicWitnessTerm) -> Option<E> {
match witness {
BasicWitnessTerm::Elem(element) => self.domain().into_iter().find(|e| e == element),
BasicWitnessTerm::Const(_) => self.rewrites.get(witness).cloned(),
BasicWitnessTerm::App { function, terms } => {
let terms: Vec<Option<E>> = terms.iter().map(|t| self.element(t)).collect();
if terms.iter().any(|e| e.is_none()) {
None
} else {
let terms: Vec<BasicWitnessTerm> =
terms.into_iter().map(|e| e.unwrap().into()).collect();
self.rewrites
.get(&BasicWitnessTerm::App {
function: (*function).clone(),
terms,
})
.cloned()
}
}
}
}
fn finalize(mut self) -> Self {
let rewrites = self.equation_rewrites().unwrap();
self.rewrite_model(&rewrites);
self
}
}
impl Clone for RelModel {
fn clone(&self) -> Self {
Self {
id: rand::random(),
element_index: self.element_index,
rewrites: self.rewrites.clone(),
database: self.database.clone(),
relations: self.relations.clone(),
}
}
}
impl fmt::Debug for RelModel {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let domain: Vec<String> = self.domain().into_iter().map(|e| e.to_string()).collect();
let elements: Vec<String> = self
.domain()
.iter()
.sorted()
.iter()
.map(|e| {
let witnesses: Vec<String> =
self.witness(e).iter().map(|w| w.to_string()).collect();
let witnesses = witnesses.into_iter().sorted();
format!("{} -> {}", witnesses.into_iter().sorted().join(", "), e)
})
.collect();
let facts: Vec<String> = self.facts().into_iter().map(|e| e.to_string()).collect();
write!(
f,
"Domain: {{{}}}\nElements:{}\nFacts: {}\n",
domain.join(", "),
elements.join(", "),
facts.join(", ")
)
}
}
// Creates a dictionary of signatures and their corresponding relations to
// access their instances in the database.
fn relations_map(
sig: &Sig,
db: &mut codd::Database,
) -> Result<HashMap<Symbol, rel_exp::Relation<Tuple>>, Error> {
let mut relations = HashMap::new();
for c in sig.constants().iter() {
let name = constant_instance_name(c);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(Symbol::Const(c.clone()), relation);
}
for f in sig.functions().values() {
let name = function_instance_name(&f.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Func {
symbol: f.symbol.clone(),
arity: f.arity,
},
relation,
);
}
for p in sig.predicates().values() {
if p.symbol.name() == EQUALITY {
continue; // Equality is a special case (below)
}
let name = predicate_instance_name(&p.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Pred {
symbol: p.symbol.clone(),
arity: p.arity,
},
relation,
);
}
relations.insert(Symbol::Domain, db.add_relation::<Tuple>(DOMAIN)?);
relations
.entry(Symbol::Equality)
.or_insert(db.add_relation::<Tuple>(EQUALITY)?);
Ok(relations)
}
| witness | identifier_name |
mod.rs | mod graphviz;
pub mod locals;
mod source;
pub use source::initialise_statics;
use rustc_hir::definitions::DefPathData;
use rustc_mir::interpret::{AllocId, Machine, Pointer};
use rustc_target::abi::Size;
use horrorshow::{Raw, Template};
use rocket::response::content::Html;
use crate::step::Breakpoint;
use crate::PrirodaContext;
pub fn template(pcx: &PrirodaContext<'_, '_>, title: String, t: impl Template) -> Html<String> {
let mut buf = String::new();
(horrorshow::html! {
html {
head {
title { : title }
meta(charset = "UTF-8") {}
script(src="/resources/svg-pan-zoom.js") {}
script(src="/resources/zoom_mir.js") {}
: Raw(refresh_script(pcx))
}
body(onload="enable_mir_mousewheel()") {
link(rel="stylesheet", href="/resources/positioning.css");
link(rel="stylesheet", href=format!("/resources/style-{}.css", pcx.config.theme));
: t
}
}
})
.write_to_string(&mut buf)
.unwrap();
Html(buf)
}
pub fn refresh_script(pcx: &PrirodaContext<'_, '_>) -> String {
if pcx.config.auto_refresh | else {
String::new()
}
}
pub fn render_main_window(
pcx: &PrirodaContext<'_, '_>,
display_frame: Option<usize>,
message: String,
) -> Html<String> {
let is_active_stack_frame = match display_frame {
Some(n) => n == Machine::stack(&pcx.ecx).len() - 1,
None => true,
};
let frame = display_frame
.and_then(|frame| Machine::stack(&pcx.ecx).get(frame))
.or_else(|| Machine::stack(&pcx.ecx).last());
let stack: Vec<(String, String, String)> = Machine::stack(&pcx.ecx)
.iter()
.map(|frame| {
let instance = &frame.instance;
let span = frame.current_source_info().unwrap().span;
let name = if pcx
.ecx
.tcx
.def_key(instance.def_id())
.disambiguated_data
.data
== DefPathData::ClosureExpr
{
"inside call to closure".to_string()
} else {
instance.to_string()
};
let span = self::source::pretty_src_path(span);
(name, span, format!("{:?}", instance.def_id()))
})
.collect();
let rendered_breakpoints: Vec<String> = pcx
.config
.bptree
.iter()
.map(|&Breakpoint(def_id, bb, stmt)| format!("{:?}@{}:{}", def_id, bb.index(), stmt))
.collect();
let rendered_locals = frame
.map(|frame| locals::render_locals(&pcx.ecx, frame))
.unwrap_or_else(String::new);
let rendered_source = source::render_source(pcx.ecx.tcx.tcx, frame);
let mir_graph = frame.map(|frame| {
graphviz::render_html(frame, pcx.config.bptree.for_def_id(frame.instance.def_id()))
});
let filename = pcx
.ecx
.tcx
.sess
.local_crate_source_file
.as_ref()
.map(|f| f.display().to_string())
.unwrap_or_else(|| "no file name".to_string());
template(
pcx,
filename,
horrorshow::html! {
div(id="left") {
div(id="commands") {
@ if is_active_stack_frame {
a(href="/step/single") { div(title="Execute next MIR statement/terminator") { : "Step" } }
a(href="/step/next") { div(title="Run until after the next MIR statement/terminator") { : "Next" } }
a(href="/step/return") { div(title="Run until the function returns") { : "Return" } }
a(href="/step/single_back") { div(title="Execute previous MIR statement/terminator (restarts and steps till one stmt before the current stmt)") { : "Step back (slow)" } }
a(href="/step/continue") { div(title="Run until termination or breakpoint") { : "Continue" } }
a(href="/step/restart") { div(title="Abort execution and restart") { : "Restart" } }
a(href="/breakpoints/add_here") { div(title="Add breakpoint at current location") { : "Add breakpoint here"} }
a(href="/breakpoints/remove_all") { div(title="Remove all breakpoints") { : "Remove all breakpoints"} }
} else {
a(href="/") { div(title="Go to active stack frame") { : "Go back to active stack frame" } }
}
}
div(id="messages") {
p { : message }
}
div(id="mir") {
: Raw(mir_graph.unwrap_or_else(|| "no current function".to_string()))
}
}
div(id="right") {
div {
: format!("Step count: {}", pcx.step_count);
}
div(id="stack") {
table(border="1") {
@ for (i, &(ref s, ref span, ref def_id)) in stack.iter().enumerate().rev() {
tr {
@ if i == display_frame.unwrap_or(stack.len() - 1) { td { : Raw("→") } } else { td; }
td { : s }
td { : span }
td { : def_id }
@ if i == display_frame.unwrap_or(stack.len() - 1) { td; } else { td { a(href=format!("/frame/{}", i)) { : "View" } } }
}
}
}
}
div(id="breakpoints") {
: "Breakpoints: "; br;
table(border="1") {
@ for bp in rendered_breakpoints {
tr {
td { : &bp }
td { a(href=format!("/breakpoints/remove/{}", bp)) { : "remove" } }
}
}
}
}
div(id="locals") {
: Raw(rendered_locals)
}
div(id="source") {
: rendered_source
}
}
},
)
}
pub fn render_reverse_ptr(pcx: &PrirodaContext<'_, '_>, alloc_id: u64) -> Html<String> {
let allocs: Vec<_> = pcx.ecx.memory.alloc_map().iter(|values| {
values
.filter_map(|(&id, (_kind, alloc))| {
alloc
.relocations()
.values()
.find(|&&(_tag, reloc)| reloc == id)
.map(|_| id)
})
.collect()
});
template(
pcx,
format!("Allocations with pointers to Allocation {}", alloc_id),
horrorshow::html! {
@for id in allocs {
a(href=format!("/ptr/{}", id)) { : format!("Allocation {}", id) }
br;
}
},
)
}
pub fn render_ptr_memory(
pcx: &PrirodaContext<'_, '_>,
alloc_id: AllocId,
offset: u64,
) -> Html<String> {
let (mem, offset, rest) = if let Ok((_, mem, bytes)) = locals::print_ptr(
&pcx.ecx,
Pointer::new(alloc_id, Size::from_bytes(offset))
.with_tag(miri::Tag::Untagged)
.into(),
None,
) {
if bytes * 2 > offset {
(mem, offset, (bytes * 2 - offset - 1) as usize)
} else if bytes * 2 == 0 && offset == 0 {
(mem, 0, 0)
} else {
("out of bounds offset".to_string(), 0, 0)
}
} else {
("unknown memory".to_string(), 0, 0)
};
template(
pcx,
format!("Allocation {}", alloc_id),
horrorshow::html! {
span(style="font-family: monospace") {
: format!("{nil:.<offset$}┌{nil:─<rest$}", nil = "", offset = offset as usize, rest = rest)
}
br;
span(style="font-family: monospace") { : Raw(mem) }
br;
a(href=format!("/reverse_ptr/{}", alloc_id)) { : "List allocations with pointers into this allocation" }
},
)
}
pub struct FlashString(String);
impl<'a, 'r> ::rocket::request::FromRequest<'a, 'r> for FlashString {
type Error =!;
fn from_request(request: &'a rocket::Request<'r>) -> rocket::request::Outcome<Self,!> {
rocket::Outcome::Success(FlashString(
Option::<rocket::request::FlashMessage<'_, '_>>::from_request(request)?
.map(|flash| flash.msg().to_string())
.unwrap_or_else(String::new),
))
}
}
pub mod routes {
use super::*;
use crate::*;
pub fn routes() -> Vec<::rocket::Route> {
routes![index, frame, frame_invalid, ptr, reverse_ptr]
}
#[get("/")]
pub fn index(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, None, flash.0))
}
#[get("/frame/<frame>")]
pub fn frame(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
frame: usize,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, Some(frame), flash.0))
}
#[get("/frame/<frame>", rank = 42)] // Error handler
fn frame_invalid(frame: String) -> BadRequest<String> {
BadRequest(Some(format!(
"not a number: {:?}",
frame.parse::<usize>().unwrap_err()
)))
}
#[get("/ptr/<alloc_id>/<offset>")]
pub fn ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
alloc_id: u64,
offset: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_ptr_memory(pcx, AllocId(alloc_id), offset))
}
#[get("/reverse_ptr/<ptr>")]
fn reverse_ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
ptr: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_reverse_ptr(pcx, ptr))
}
}
| {
r#"<script>
setInterval(() => {
fetch("/step_count").then((res) => {
if(res.status == 200) {
return res.text();
} else {
throw "";
}
}).then((res) => {
if(res != #step_count#) {
window.location.reload();
}
}).catch(()=>{});
}, 1000);
</script>"#
.replace("#step_count#", &format!("{}", pcx.step_count))
} | conditional_block |
mod.rs | mod graphviz;
pub mod locals;
mod source;
pub use source::initialise_statics;
use rustc_hir::definitions::DefPathData;
use rustc_mir::interpret::{AllocId, Machine, Pointer};
use rustc_target::abi::Size;
use horrorshow::{Raw, Template};
use rocket::response::content::Html;
use crate::step::Breakpoint;
use crate::PrirodaContext;
pub fn template(pcx: &PrirodaContext<'_, '_>, title: String, t: impl Template) -> Html<String> {
let mut buf = String::new();
(horrorshow::html! {
html {
head {
title { : title }
meta(charset = "UTF-8") {}
script(src="/resources/svg-pan-zoom.js") {}
script(src="/resources/zoom_mir.js") {}
: Raw(refresh_script(pcx))
}
body(onload="enable_mir_mousewheel()") {
link(rel="stylesheet", href="/resources/positioning.css");
link(rel="stylesheet", href=format!("/resources/style-{}.css", pcx.config.theme));
: t
}
}
})
.write_to_string(&mut buf)
.unwrap();
Html(buf)
}
pub fn refresh_script(pcx: &PrirodaContext<'_, '_>) -> String {
if pcx.config.auto_refresh {
r#"<script>
setInterval(() => {
fetch("/step_count").then((res) => {
if(res.status == 200) {
return res.text();
} else {
throw "";
}
}).then((res) => {
if(res!= #step_count#) {
window.location.reload();
}
}).catch(()=>{});
}, 1000);
</script>"#
.replace("#step_count#", &format!("{}", pcx.step_count))
} else {
String::new()
}
}
pub fn render_main_window(
pcx: &PrirodaContext<'_, '_>,
display_frame: Option<usize>,
message: String,
) -> Html<String> {
let is_active_stack_frame = match display_frame {
Some(n) => n == Machine::stack(&pcx.ecx).len() - 1,
None => true,
};
let frame = display_frame
.and_then(|frame| Machine::stack(&pcx.ecx).get(frame))
.or_else(|| Machine::stack(&pcx.ecx).last());
let stack: Vec<(String, String, String)> = Machine::stack(&pcx.ecx)
.iter()
.map(|frame| {
let instance = &frame.instance;
let span = frame.current_source_info().unwrap().span;
let name = if pcx
.ecx
.tcx
.def_key(instance.def_id())
.disambiguated_data
.data
== DefPathData::ClosureExpr
{
"inside call to closure".to_string()
} else {
instance.to_string()
};
let span = self::source::pretty_src_path(span);
(name, span, format!("{:?}", instance.def_id()))
})
.collect();
let rendered_breakpoints: Vec<String> = pcx
.config
.bptree
.iter()
.map(|&Breakpoint(def_id, bb, stmt)| format!("{:?}@{}:{}", def_id, bb.index(), stmt))
.collect();
let rendered_locals = frame
.map(|frame| locals::render_locals(&pcx.ecx, frame))
.unwrap_or_else(String::new);
let rendered_source = source::render_source(pcx.ecx.tcx.tcx, frame);
let mir_graph = frame.map(|frame| {
graphviz::render_html(frame, pcx.config.bptree.for_def_id(frame.instance.def_id()))
});
let filename = pcx
.ecx
.tcx
.sess
.local_crate_source_file
.as_ref()
.map(|f| f.display().to_string())
.unwrap_or_else(|| "no file name".to_string());
template(
pcx,
filename,
horrorshow::html! {
div(id="left") {
div(id="commands") {
@ if is_active_stack_frame {
a(href="/step/single") { div(title="Execute next MIR statement/terminator") { : "Step" } }
a(href="/step/next") { div(title="Run until after the next MIR statement/terminator") { : "Next" } }
a(href="/step/return") { div(title="Run until the function returns") { : "Return" } }
a(href="/step/single_back") { div(title="Execute previous MIR statement/terminator (restarts and steps till one stmt before the current stmt)") { : "Step back (slow)" } }
a(href="/step/continue") { div(title="Run until termination or breakpoint") { : "Continue" } }
a(href="/step/restart") { div(title="Abort execution and restart") { : "Restart" } }
a(href="/breakpoints/add_here") { div(title="Add breakpoint at current location") { : "Add breakpoint here"} }
a(href="/breakpoints/remove_all") { div(title="Remove all breakpoints") { : "Remove all breakpoints"} }
} else {
a(href="/") { div(title="Go to active stack frame") { : "Go back to active stack frame" } }
}
}
div(id="messages") {
p { : message }
}
div(id="mir") {
: Raw(mir_graph.unwrap_or_else(|| "no current function".to_string()))
} | div(id="stack") {
table(border="1") {
@ for (i, &(ref s, ref span, ref def_id)) in stack.iter().enumerate().rev() {
tr {
@ if i == display_frame.unwrap_or(stack.len() - 1) { td { : Raw("→") } } else { td; }
td { : s }
td { : span }
td { : def_id }
@ if i == display_frame.unwrap_or(stack.len() - 1) { td; } else { td { a(href=format!("/frame/{}", i)) { : "View" } } }
}
}
}
}
div(id="breakpoints") {
: "Breakpoints: "; br;
table(border="1") {
@ for bp in rendered_breakpoints {
tr {
td { : &bp }
td { a(href=format!("/breakpoints/remove/{}", bp)) { : "remove" } }
}
}
}
}
div(id="locals") {
: Raw(rendered_locals)
}
div(id="source") {
: rendered_source
}
}
},
)
}
pub fn render_reverse_ptr(pcx: &PrirodaContext<'_, '_>, alloc_id: u64) -> Html<String> {
let allocs: Vec<_> = pcx.ecx.memory.alloc_map().iter(|values| {
values
.filter_map(|(&id, (_kind, alloc))| {
alloc
.relocations()
.values()
.find(|&&(_tag, reloc)| reloc == id)
.map(|_| id)
})
.collect()
});
template(
pcx,
format!("Allocations with pointers to Allocation {}", alloc_id),
horrorshow::html! {
@for id in allocs {
a(href=format!("/ptr/{}", id)) { : format!("Allocation {}", id) }
br;
}
},
)
}
pub fn render_ptr_memory(
pcx: &PrirodaContext<'_, '_>,
alloc_id: AllocId,
offset: u64,
) -> Html<String> {
let (mem, offset, rest) = if let Ok((_, mem, bytes)) = locals::print_ptr(
&pcx.ecx,
Pointer::new(alloc_id, Size::from_bytes(offset))
.with_tag(miri::Tag::Untagged)
.into(),
None,
) {
if bytes * 2 > offset {
(mem, offset, (bytes * 2 - offset - 1) as usize)
} else if bytes * 2 == 0 && offset == 0 {
(mem, 0, 0)
} else {
("out of bounds offset".to_string(), 0, 0)
}
} else {
("unknown memory".to_string(), 0, 0)
};
template(
pcx,
format!("Allocation {}", alloc_id),
horrorshow::html! {
span(style="font-family: monospace") {
: format!("{nil:.<offset$}┌{nil:─<rest$}", nil = "", offset = offset as usize, rest = rest)
}
br;
span(style="font-family: monospace") { : Raw(mem) }
br;
a(href=format!("/reverse_ptr/{}", alloc_id)) { : "List allocations with pointers into this allocation" }
},
)
}
pub struct FlashString(String);
impl<'a, 'r> ::rocket::request::FromRequest<'a, 'r> for FlashString {
type Error =!;
fn from_request(request: &'a rocket::Request<'r>) -> rocket::request::Outcome<Self,!> {
rocket::Outcome::Success(FlashString(
Option::<rocket::request::FlashMessage<'_, '_>>::from_request(request)?
.map(|flash| flash.msg().to_string())
.unwrap_or_else(String::new),
))
}
}
pub mod routes {
use super::*;
use crate::*;
pub fn routes() -> Vec<::rocket::Route> {
routes![index, frame, frame_invalid, ptr, reverse_ptr]
}
#[get("/")]
pub fn index(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, None, flash.0))
}
#[get("/frame/<frame>")]
pub fn frame(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
frame: usize,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, Some(frame), flash.0))
}
#[get("/frame/<frame>", rank = 42)] // Error handler
fn frame_invalid(frame: String) -> BadRequest<String> {
BadRequest(Some(format!(
"not a number: {:?}",
frame.parse::<usize>().unwrap_err()
)))
}
#[get("/ptr/<alloc_id>/<offset>")]
pub fn ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
alloc_id: u64,
offset: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_ptr_memory(pcx, AllocId(alloc_id), offset))
}
#[get("/reverse_ptr/<ptr>")]
fn reverse_ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
ptr: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_reverse_ptr(pcx, ptr))
}
} | }
div(id="right") {
div {
: format!("Step count: {}", pcx.step_count);
} | random_line_split |
mod.rs | mod graphviz;
pub mod locals;
mod source;
pub use source::initialise_statics;
use rustc_hir::definitions::DefPathData;
use rustc_mir::interpret::{AllocId, Machine, Pointer};
use rustc_target::abi::Size;
use horrorshow::{Raw, Template};
use rocket::response::content::Html;
use crate::step::Breakpoint;
use crate::PrirodaContext;
pub fn template(pcx: &PrirodaContext<'_, '_>, title: String, t: impl Template) -> Html<String> {
let mut buf = String::new();
(horrorshow::html! {
html {
head {
title { : title }
meta(charset = "UTF-8") {}
script(src="/resources/svg-pan-zoom.js") {}
script(src="/resources/zoom_mir.js") {}
: Raw(refresh_script(pcx))
}
body(onload="enable_mir_mousewheel()") {
link(rel="stylesheet", href="/resources/positioning.css");
link(rel="stylesheet", href=format!("/resources/style-{}.css", pcx.config.theme));
: t
}
}
})
.write_to_string(&mut buf)
.unwrap();
Html(buf)
}
pub fn refresh_script(pcx: &PrirodaContext<'_, '_>) -> String {
if pcx.config.auto_refresh {
r#"<script>
setInterval(() => {
fetch("/step_count").then((res) => {
if(res.status == 200) {
return res.text();
} else {
throw "";
}
}).then((res) => {
if(res!= #step_count#) {
window.location.reload();
}
}).catch(()=>{});
}, 1000);
</script>"#
.replace("#step_count#", &format!("{}", pcx.step_count))
} else {
String::new()
}
}
pub fn render_main_window(
pcx: &PrirodaContext<'_, '_>,
display_frame: Option<usize>,
message: String,
) -> Html<String> {
let is_active_stack_frame = match display_frame {
Some(n) => n == Machine::stack(&pcx.ecx).len() - 1,
None => true,
};
let frame = display_frame
.and_then(|frame| Machine::stack(&pcx.ecx).get(frame))
.or_else(|| Machine::stack(&pcx.ecx).last());
let stack: Vec<(String, String, String)> = Machine::stack(&pcx.ecx)
.iter()
.map(|frame| {
let instance = &frame.instance;
let span = frame.current_source_info().unwrap().span;
let name = if pcx
.ecx
.tcx
.def_key(instance.def_id())
.disambiguated_data
.data
== DefPathData::ClosureExpr
{
"inside call to closure".to_string()
} else {
instance.to_string()
};
let span = self::source::pretty_src_path(span);
(name, span, format!("{:?}", instance.def_id()))
})
.collect();
let rendered_breakpoints: Vec<String> = pcx
.config
.bptree
.iter()
.map(|&Breakpoint(def_id, bb, stmt)| format!("{:?}@{}:{}", def_id, bb.index(), stmt))
.collect();
let rendered_locals = frame
.map(|frame| locals::render_locals(&pcx.ecx, frame))
.unwrap_or_else(String::new);
let rendered_source = source::render_source(pcx.ecx.tcx.tcx, frame);
let mir_graph = frame.map(|frame| {
graphviz::render_html(frame, pcx.config.bptree.for_def_id(frame.instance.def_id()))
});
let filename = pcx
.ecx
.tcx
.sess
.local_crate_source_file
.as_ref()
.map(|f| f.display().to_string())
.unwrap_or_else(|| "no file name".to_string());
template(
pcx,
filename,
horrorshow::html! {
div(id="left") {
div(id="commands") {
@ if is_active_stack_frame {
a(href="/step/single") { div(title="Execute next MIR statement/terminator") { : "Step" } }
a(href="/step/next") { div(title="Run until after the next MIR statement/terminator") { : "Next" } }
a(href="/step/return") { div(title="Run until the function returns") { : "Return" } }
a(href="/step/single_back") { div(title="Execute previous MIR statement/terminator (restarts and steps till one stmt before the current stmt)") { : "Step back (slow)" } }
a(href="/step/continue") { div(title="Run until termination or breakpoint") { : "Continue" } }
a(href="/step/restart") { div(title="Abort execution and restart") { : "Restart" } }
a(href="/breakpoints/add_here") { div(title="Add breakpoint at current location") { : "Add breakpoint here"} }
a(href="/breakpoints/remove_all") { div(title="Remove all breakpoints") { : "Remove all breakpoints"} }
} else {
a(href="/") { div(title="Go to active stack frame") { : "Go back to active stack frame" } }
}
}
div(id="messages") {
p { : message }
}
div(id="mir") {
: Raw(mir_graph.unwrap_or_else(|| "no current function".to_string()))
}
}
div(id="right") {
div {
: format!("Step count: {}", pcx.step_count);
}
div(id="stack") {
table(border="1") {
@ for (i, &(ref s, ref span, ref def_id)) in stack.iter().enumerate().rev() {
tr {
@ if i == display_frame.unwrap_or(stack.len() - 1) { td { : Raw("→") } } else { td; }
td { : s }
td { : span }
td { : def_id }
@ if i == display_frame.unwrap_or(stack.len() - 1) { td; } else { td { a(href=format!("/frame/{}", i)) { : "View" } } }
}
}
}
}
div(id="breakpoints") {
: "Breakpoints: "; br;
table(border="1") {
@ for bp in rendered_breakpoints {
tr {
td { : &bp }
td { a(href=format!("/breakpoints/remove/{}", bp)) { : "remove" } }
}
}
}
}
div(id="locals") {
: Raw(rendered_locals)
}
div(id="source") {
: rendered_source
}
}
},
)
}
pub fn render_reverse_ptr(pcx: &PrirodaContext<'_, '_>, alloc_id: u64) -> Html<String> {
let allocs: Vec<_> = pcx.ecx.memory.alloc_map().iter(|values| {
values
.filter_map(|(&id, (_kind, alloc))| {
alloc
.relocations()
.values()
.find(|&&(_tag, reloc)| reloc == id)
.map(|_| id)
})
.collect()
});
template(
pcx,
format!("Allocations with pointers to Allocation {}", alloc_id),
horrorshow::html! {
@for id in allocs {
a(href=format!("/ptr/{}", id)) { : format!("Allocation {}", id) }
br;
}
},
)
}
pub fn render_ptr_memory(
pcx: &PrirodaContext<'_, '_>,
alloc_id: AllocId,
offset: u64,
) -> Html<String> {
let (mem, offset, rest) = if let Ok((_, mem, bytes)) = locals::print_ptr(
&pcx.ecx,
Pointer::new(alloc_id, Size::from_bytes(offset))
.with_tag(miri::Tag::Untagged)
.into(),
None,
) {
if bytes * 2 > offset {
(mem, offset, (bytes * 2 - offset - 1) as usize)
} else if bytes * 2 == 0 && offset == 0 {
(mem, 0, 0)
} else {
("out of bounds offset".to_string(), 0, 0)
}
} else {
("unknown memory".to_string(), 0, 0)
};
template(
pcx,
format!("Allocation {}", alloc_id),
horrorshow::html! {
span(style="font-family: monospace") {
: format!("{nil:.<offset$}┌{nil:─<rest$}", nil = "", offset = offset as usize, rest = rest)
}
br;
span(style="font-family: monospace") { : Raw(mem) }
br;
a(href=format!("/reverse_ptr/{}", alloc_id)) { : "List allocations with pointers into this allocation" }
},
)
}
pub struct FlashString(String);
impl<'a, 'r> ::rocket::request::FromRequest<'a, 'r> for FlashString {
type Error =!;
fn from_request(request: &'a rocket::Request<'r>) -> rocket::request::Outcome<Self,!> {
rocket::Outcome::Success(FlashString(
Option::<rocket::request::FlashMessage<'_, '_>>::from_request(request)?
.map(|flash| flash.msg().to_string())
.unwrap_or_else(String::new),
))
}
}
pub mod routes {
use super::*;
use crate::*;
pub fn routes() -> Vec<::rocket::Route> {
routes![index, frame, frame_invalid, ptr, reverse_ptr]
}
#[get("/")]
pub fn index(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, None, flash.0))
}
#[get("/frame/<frame>")]
pub fn frame(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
frame: usize,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, Some(frame), flash.0))
}
#[get("/frame/<frame>", rank = 42)] // Error handler
fn fram | me: String) -> BadRequest<String> {
BadRequest(Some(format!(
"not a number: {:?}",
frame.parse::<usize>().unwrap_err()
)))
}
#[get("/ptr/<alloc_id>/<offset>")]
pub fn ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
alloc_id: u64,
offset: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_ptr_memory(pcx, AllocId(alloc_id), offset))
}
#[get("/reverse_ptr/<ptr>")]
fn reverse_ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
ptr: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_reverse_ptr(pcx, ptr))
}
}
| e_invalid(fra | identifier_name |
mod.rs | mod graphviz;
pub mod locals;
mod source;
pub use source::initialise_statics;
use rustc_hir::definitions::DefPathData;
use rustc_mir::interpret::{AllocId, Machine, Pointer};
use rustc_target::abi::Size;
use horrorshow::{Raw, Template};
use rocket::response::content::Html;
use crate::step::Breakpoint;
use crate::PrirodaContext;
pub fn template(pcx: &PrirodaContext<'_, '_>, title: String, t: impl Template) -> Html<String> | Html(buf)
}
pub fn refresh_script(pcx: &PrirodaContext<'_, '_>) -> String {
if pcx.config.auto_refresh {
r#"<script>
setInterval(() => {
fetch("/step_count").then((res) => {
if(res.status == 200) {
return res.text();
} else {
throw "";
}
}).then((res) => {
if(res!= #step_count#) {
window.location.reload();
}
}).catch(()=>{});
}, 1000);
</script>"#
.replace("#step_count#", &format!("{}", pcx.step_count))
} else {
String::new()
}
}
pub fn render_main_window(
pcx: &PrirodaContext<'_, '_>,
display_frame: Option<usize>,
message: String,
) -> Html<String> {
let is_active_stack_frame = match display_frame {
Some(n) => n == Machine::stack(&pcx.ecx).len() - 1,
None => true,
};
let frame = display_frame
.and_then(|frame| Machine::stack(&pcx.ecx).get(frame))
.or_else(|| Machine::stack(&pcx.ecx).last());
let stack: Vec<(String, String, String)> = Machine::stack(&pcx.ecx)
.iter()
.map(|frame| {
let instance = &frame.instance;
let span = frame.current_source_info().unwrap().span;
let name = if pcx
.ecx
.tcx
.def_key(instance.def_id())
.disambiguated_data
.data
== DefPathData::ClosureExpr
{
"inside call to closure".to_string()
} else {
instance.to_string()
};
let span = self::source::pretty_src_path(span);
(name, span, format!("{:?}", instance.def_id()))
})
.collect();
let rendered_breakpoints: Vec<String> = pcx
.config
.bptree
.iter()
.map(|&Breakpoint(def_id, bb, stmt)| format!("{:?}@{}:{}", def_id, bb.index(), stmt))
.collect();
let rendered_locals = frame
.map(|frame| locals::render_locals(&pcx.ecx, frame))
.unwrap_or_else(String::new);
let rendered_source = source::render_source(pcx.ecx.tcx.tcx, frame);
let mir_graph = frame.map(|frame| {
graphviz::render_html(frame, pcx.config.bptree.for_def_id(frame.instance.def_id()))
});
let filename = pcx
.ecx
.tcx
.sess
.local_crate_source_file
.as_ref()
.map(|f| f.display().to_string())
.unwrap_or_else(|| "no file name".to_string());
template(
pcx,
filename,
horrorshow::html! {
div(id="left") {
div(id="commands") {
@ if is_active_stack_frame {
a(href="/step/single") { div(title="Execute next MIR statement/terminator") { : "Step" } }
a(href="/step/next") { div(title="Run until after the next MIR statement/terminator") { : "Next" } }
a(href="/step/return") { div(title="Run until the function returns") { : "Return" } }
a(href="/step/single_back") { div(title="Execute previous MIR statement/terminator (restarts and steps till one stmt before the current stmt)") { : "Step back (slow)" } }
a(href="/step/continue") { div(title="Run until termination or breakpoint") { : "Continue" } }
a(href="/step/restart") { div(title="Abort execution and restart") { : "Restart" } }
a(href="/breakpoints/add_here") { div(title="Add breakpoint at current location") { : "Add breakpoint here"} }
a(href="/breakpoints/remove_all") { div(title="Remove all breakpoints") { : "Remove all breakpoints"} }
} else {
a(href="/") { div(title="Go to active stack frame") { : "Go back to active stack frame" } }
}
}
div(id="messages") {
p { : message }
}
div(id="mir") {
: Raw(mir_graph.unwrap_or_else(|| "no current function".to_string()))
}
}
div(id="right") {
div {
: format!("Step count: {}", pcx.step_count);
}
div(id="stack") {
table(border="1") {
@ for (i, &(ref s, ref span, ref def_id)) in stack.iter().enumerate().rev() {
tr {
@ if i == display_frame.unwrap_or(stack.len() - 1) { td { : Raw("→") } } else { td; }
td { : s }
td { : span }
td { : def_id }
@ if i == display_frame.unwrap_or(stack.len() - 1) { td; } else { td { a(href=format!("/frame/{}", i)) { : "View" } } }
}
}
}
}
div(id="breakpoints") {
: "Breakpoints: "; br;
table(border="1") {
@ for bp in rendered_breakpoints {
tr {
td { : &bp }
td { a(href=format!("/breakpoints/remove/{}", bp)) { : "remove" } }
}
}
}
}
div(id="locals") {
: Raw(rendered_locals)
}
div(id="source") {
: rendered_source
}
}
},
)
}
pub fn render_reverse_ptr(pcx: &PrirodaContext<'_, '_>, alloc_id: u64) -> Html<String> {
let allocs: Vec<_> = pcx.ecx.memory.alloc_map().iter(|values| {
values
.filter_map(|(&id, (_kind, alloc))| {
alloc
.relocations()
.values()
.find(|&&(_tag, reloc)| reloc == id)
.map(|_| id)
})
.collect()
});
template(
pcx,
format!("Allocations with pointers to Allocation {}", alloc_id),
horrorshow::html! {
@for id in allocs {
a(href=format!("/ptr/{}", id)) { : format!("Allocation {}", id) }
br;
}
},
)
}
pub fn render_ptr_memory(
pcx: &PrirodaContext<'_, '_>,
alloc_id: AllocId,
offset: u64,
) -> Html<String> {
let (mem, offset, rest) = if let Ok((_, mem, bytes)) = locals::print_ptr(
&pcx.ecx,
Pointer::new(alloc_id, Size::from_bytes(offset))
.with_tag(miri::Tag::Untagged)
.into(),
None,
) {
if bytes * 2 > offset {
(mem, offset, (bytes * 2 - offset - 1) as usize)
} else if bytes * 2 == 0 && offset == 0 {
(mem, 0, 0)
} else {
("out of bounds offset".to_string(), 0, 0)
}
} else {
("unknown memory".to_string(), 0, 0)
};
template(
pcx,
format!("Allocation {}", alloc_id),
horrorshow::html! {
span(style="font-family: monospace") {
: format!("{nil:.<offset$}┌{nil:─<rest$}", nil = "", offset = offset as usize, rest = rest)
}
br;
span(style="font-family: monospace") { : Raw(mem) }
br;
a(href=format!("/reverse_ptr/{}", alloc_id)) { : "List allocations with pointers into this allocation" }
},
)
}
pub struct FlashString(String);
impl<'a, 'r> ::rocket::request::FromRequest<'a, 'r> for FlashString {
type Error =!;
fn from_request(request: &'a rocket::Request<'r>) -> rocket::request::Outcome<Self,!> {
rocket::Outcome::Success(FlashString(
Option::<rocket::request::FlashMessage<'_, '_>>::from_request(request)?
.map(|flash| flash.msg().to_string())
.unwrap_or_else(String::new),
))
}
}
pub mod routes {
use super::*;
use crate::*;
pub fn routes() -> Vec<::rocket::Route> {
routes![index, frame, frame_invalid, ptr, reverse_ptr]
}
#[get("/")]
pub fn index(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, None, flash.0))
}
#[get("/frame/<frame>")]
pub fn frame(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
frame: usize,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, Some(frame), flash.0))
}
#[get("/frame/<frame>", rank = 42)] // Error handler
fn frame_invalid(frame: String) -> BadRequest<String> {
BadRequest(Some(format!(
"not a number: {:?}",
frame.parse::<usize>().unwrap_err()
)))
}
#[get("/ptr/<alloc_id>/<offset>")]
pub fn ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
alloc_id: u64,
offset: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_ptr_memory(pcx, AllocId(alloc_id), offset))
}
#[get("/reverse_ptr/<ptr>")]
fn reverse_ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
ptr: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_reverse_ptr(pcx, ptr))
}
}
| {
let mut buf = String::new();
(horrorshow::html! {
html {
head {
title { : title }
meta(charset = "UTF-8") {}
script(src="/resources/svg-pan-zoom.js") {}
script(src="/resources/zoom_mir.js") {}
: Raw(refresh_script(pcx))
}
body(onload="enable_mir_mousewheel()") {
link(rel="stylesheet", href="/resources/positioning.css");
link(rel="stylesheet", href=format!("/resources/style-{}.css", pcx.config.theme));
: t
}
}
})
.write_to_string(&mut buf)
.unwrap(); | identifier_body |
window_manager.rs | use crate::geometry::{Displacement, Point};
use crate::surface::{Surface, SurfaceExt};
use crate::{
event::{Event, EventOnce},
input::seat::SeatManager,
output_manager::OutputManager,
window::Window,
window_management_policy::WmPolicyManager,
};
use log::{trace, warn};
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::rc::{Rc, Weak};
use wlroots_sys::*;
#[derive(Debug, Copy, Clone)]
pub enum WindowLayer {
Background,
Bottom,
Normal,
Top,
Overlay,
}
#[derive(Default)]
struct WindowLayers {
background: Vec<Rc<Window>>,
bottom: Vec<Rc<Window>>,
normal: Vec<Rc<Window>>,
top: Vec<Rc<Window>>,
overlay: Vec<Rc<Window>>,
}
impl WindowLayers {
fn all_windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
self
.background
.iter()
.chain(self.bottom.iter())
.chain(self.normal.iter())
.chain(self.top.iter())
.chain(self.overlay.iter())
.cloned()
}
fn update<F>(&mut self, layer: WindowLayer, mut f: F)
where
F: FnMut(&mut Vec<Rc<Window>>),
{
match layer {
WindowLayer::Background => f(&mut self.background),
WindowLayer::Bottom => f(&mut self.bottom),
WindowLayer::Normal => f(&mut self.normal),
WindowLayer::Top => f(&mut self.top),
WindowLayer::Overlay => f(&mut self.overlay),
}
}
}
pub struct WindowManager {
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
output_manager: RefCell<Weak<OutputManager>>,
layers: RefCell<WindowLayers>,
foreign_toplevel_manager: *mut wlr_foreign_toplevel_manager_v1,
}
impl std::fmt::Debug for WindowManager {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
write!(
fmt,
"WindowManager {{windows: {0}}}",
self.layers.borrow().normal.len()
)
}
}
impl WindowManager {
pub(crate) fn init(
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
display: *mut wl_display,
) -> WindowManager {
let foreign_toplevel_manager = unsafe { wlr_foreign_toplevel_manager_v1_create(display) };
WindowManager {
wm_policy_manager,
seat_manager,
output_manager: RefCell::new(Weak::<OutputManager>::new()),
layers: RefCell::new(WindowLayers::default()),
foreign_toplevel_manager,
}
}
pub fn raw_foreign_toplevel_manager(&self) -> *mut wlr_foreign_toplevel_manager_v1 {
self.foreign_toplevel_manager
}
pub fn windows_to_render(&self) -> impl '_ + Iterator<Item = Rc<Window>> {
self.windows().filter(|window| *window.mapped.borrow())
}
pub fn window_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.extents().contains(point))
}
pub(crate) fn window_buffer_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.buffer_extents().contains(point))
}
pub(crate) fn destroy_window(&self, destroyed_window: Rc<Window>) {
self
.layers
.borrow_mut()
.update(destroyed_window.layer, |windows| {
windows.retain(|window| *window!= destroyed_window)
});
}
pub fn windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
let windows = self.layers.borrow().all_windows().collect::<Vec<_>>();
windows.into_iter()
}
/// Returns the window that holds keyboard focus
pub fn focused_window(&self) -> Option<Rc<Window>> {
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
self
.layers
.borrow()
.all_windows()
.find(|w| w.wlr_surface() == focused_surface)
}
/// If the window have keyboard focus
pub fn window_has_focus(&self, window: &Window) -> bool {
let wlr_surface = window.wlr_surface();
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
wlr_surface == focused_surface
}
/// Gives keyboard focus to the window
pub fn focus_window(&self, window: Rc<Window>) {
if!window.can_receive_focus() {
warn!("Window can not receive focus");
return;
}
if!self.seat_manager.is_input_allowed(&window) {
warn!("Refusing to set focus, input is inhibited");
return;
}
let wlr_surface = window.wlr_surface();
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface;
if wlr_surface == old_wlr_surface {
return;
}
trace!("Focusing window \"{:?}\"", window.title());
if!old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know
// it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
// Move the view to the front
self.layers.borrow_mut().update(window.layer, |windows| {
windows.retain(|s| *s!= window);
windows.push(window.clone());
});
// Activate the new window
window.surface().set_activated(true);
// Tell the seat to have the keyboard enter this window. wlroots will keep
// track of this and automatically send key events to the appropriate
// clients without additional work on your part.
let keyboard = wlr_seat_get_keyboard(self.seat_manager.raw_seat());
wlr_seat_keyboard_notify_enter(
self.seat_manager.raw_seat(),
wlr_surface,
(*keyboard).keycodes.as_mut_ptr(),
(*keyboard).num_keycodes,
&mut (*keyboard).modifiers,
);
}
self.wm_policy_manager.advise_focused_window(window);
}
/// Blurs the currently focused window without focusing another one
pub fn blur(&self) {
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat()) | // it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
wlr_seat_keyboard_clear_focus(self.seat_manager.raw_seat());
}
}
}
pub(crate) trait WindowManagerExt {
fn set_output_manager(&self, output_manager: Rc<OutputManager>);
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window>;
}
impl WindowManagerExt for Rc<WindowManager> {
fn set_output_manager(&self, output_manager: Rc<OutputManager>) {
*self.output_manager.borrow_mut() = Rc::downgrade(&output_manager);
let window_manager = self.clone();
output_manager
.on_output_layout_change()
.subscribe(Box::new(move |_| {
for window in window_manager.layers.borrow().all_windows() {
window.update_outputs();
}
}));
}
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window> {
let window = Rc::new(Window {
output_manager: self.output_manager.borrow().upgrade().expect("window_manager should be initialized with and output_manager before windows can be created"),
window_manager: self.clone(),
layer,
surface,
mapped: RefCell::new(false),
top_left: RefCell::new(Point::ZERO),
translate: RefCell::new(Displacement::ZERO),
outputs: RefCell::new(vec![]),
minimize_targets: RefCell::new(vec![]),
pending_updates: RefCell::new(BTreeMap::new()),
on_entered_output: Event::default(),
on_left_output: Event::default(),
on_destroy: EventOnce::default(),
event_manager: RefCell::new(None),
});
// If the window can receive focus, add it to the back so that
// the window management policy can choose if it want to focus the
// window
if window.can_receive_focus() {
self.layers.borrow_mut().update(layer, |windows| {
windows.insert(0, window.clone());
})
} else {
self.layers.borrow_mut().update(layer, |windows| {
windows.push(window.clone());
})
}
window
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::input::{cursor::CursorManager, event_filter::EventFilterManager};
use crate::output_manager::OutputManager;
use crate::window::WindowEventHandler;
use crate::{config::ConfigManager, window_management_policy::WmPolicyManager};
use std::ptr;
use std::rc::Rc;
#[test]
fn it_drops_and_cleans_up_on_destroy() {
let config_manager = Rc::new(ConfigManager::default());
let wm_policy_manager = Rc::new(WmPolicyManager::new());
let seat_manager = SeatManager::mock(ptr::null_mut(), ptr::null_mut());
let window_manager = Rc::new(WindowManager::init(
wm_policy_manager.clone(),
seat_manager.clone(),
ptr::null_mut(),
));
let output_manager = OutputManager::mock(
config_manager,
wm_policy_manager.clone(),
window_manager.clone(),
);
let cursor_manager = CursorManager::mock(
output_manager.clone(),
window_manager.clone(),
seat_manager.clone(),
Rc::new(EventFilterManager::new()),
ptr::null_mut(),
ptr::null_mut(),
);
window_manager.set_output_manager(output_manager.clone());
let window = window_manager.new_window(WindowLayer::Normal, Surface::Null);
let mut event_handler = WindowEventHandler {
wm_policy_manager,
output_manager: output_manager.clone(),
window_manager: window_manager.clone(),
cursor_manager: cursor_manager.clone(),
window: Rc::downgrade(&window),
foreign_toplevel_handle: None,
foreign_toplevel_event_manager: None,
};
let weak_window = Rc::downgrade(&window);
drop(window);
assert!(window_manager.windows().count() == 1);
assert!(weak_window.upgrade().is_some());
event_handler.destroy();
assert!(window_manager.windows().count() == 0);
assert!(weak_window.upgrade().is_none());
}
}
#[cfg(test)]
unsafe fn wlr_foreign_toplevel_manager_v1_create(
_display: *mut wl_display,
) -> *mut wlr_foreign_toplevel_manager_v1 {
std::ptr::null_mut()
} | .keyboard_state
.focused_surface;
if !old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know | random_line_split |
window_manager.rs | use crate::geometry::{Displacement, Point};
use crate::surface::{Surface, SurfaceExt};
use crate::{
event::{Event, EventOnce},
input::seat::SeatManager,
output_manager::OutputManager,
window::Window,
window_management_policy::WmPolicyManager,
};
use log::{trace, warn};
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::rc::{Rc, Weak};
use wlroots_sys::*;
#[derive(Debug, Copy, Clone)]
pub enum WindowLayer {
Background,
Bottom,
Normal,
Top,
Overlay,
}
#[derive(Default)]
struct WindowLayers {
background: Vec<Rc<Window>>,
bottom: Vec<Rc<Window>>,
normal: Vec<Rc<Window>>,
top: Vec<Rc<Window>>,
overlay: Vec<Rc<Window>>,
}
impl WindowLayers {
fn all_windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
self
.background
.iter()
.chain(self.bottom.iter())
.chain(self.normal.iter())
.chain(self.top.iter())
.chain(self.overlay.iter())
.cloned()
}
fn update<F>(&mut self, layer: WindowLayer, mut f: F)
where
F: FnMut(&mut Vec<Rc<Window>>),
{
match layer {
WindowLayer::Background => f(&mut self.background),
WindowLayer::Bottom => f(&mut self.bottom),
WindowLayer::Normal => f(&mut self.normal),
WindowLayer::Top => f(&mut self.top),
WindowLayer::Overlay => f(&mut self.overlay),
}
}
}
pub struct WindowManager {
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
output_manager: RefCell<Weak<OutputManager>>,
layers: RefCell<WindowLayers>,
foreign_toplevel_manager: *mut wlr_foreign_toplevel_manager_v1,
}
impl std::fmt::Debug for WindowManager {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
write!(
fmt,
"WindowManager {{windows: {0}}}",
self.layers.borrow().normal.len()
)
}
}
impl WindowManager {
pub(crate) fn init(
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
display: *mut wl_display,
) -> WindowManager {
let foreign_toplevel_manager = unsafe { wlr_foreign_toplevel_manager_v1_create(display) };
WindowManager {
wm_policy_manager,
seat_manager,
output_manager: RefCell::new(Weak::<OutputManager>::new()),
layers: RefCell::new(WindowLayers::default()),
foreign_toplevel_manager,
}
}
pub fn raw_foreign_toplevel_manager(&self) -> *mut wlr_foreign_toplevel_manager_v1 {
self.foreign_toplevel_manager
}
pub fn windows_to_render(&self) -> impl '_ + Iterator<Item = Rc<Window>> {
self.windows().filter(|window| *window.mapped.borrow())
}
pub fn window_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.extents().contains(point))
}
pub(crate) fn window_buffer_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.buffer_extents().contains(point))
}
pub(crate) fn destroy_window(&self, destroyed_window: Rc<Window>) {
self
.layers
.borrow_mut()
.update(destroyed_window.layer, |windows| {
windows.retain(|window| *window!= destroyed_window)
});
}
pub fn windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
let windows = self.layers.borrow().all_windows().collect::<Vec<_>>();
windows.into_iter()
}
/// Returns the window that holds keyboard focus
pub fn focused_window(&self) -> Option<Rc<Window>> {
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
self
.layers
.borrow()
.all_windows()
.find(|w| w.wlr_surface() == focused_surface)
}
/// If the window have keyboard focus
pub fn window_has_focus(&self, window: &Window) -> bool {
let wlr_surface = window.wlr_surface();
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
wlr_surface == focused_surface
}
/// Gives keyboard focus to the window
pub fn focus_window(&self, window: Rc<Window>) {
if!window.can_receive_focus() {
warn!("Window can not receive focus");
return;
}
if!self.seat_manager.is_input_allowed(&window) {
warn!("Refusing to set focus, input is inhibited");
return;
}
let wlr_surface = window.wlr_surface();
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface;
if wlr_surface == old_wlr_surface {
return;
}
trace!("Focusing window \"{:?}\"", window.title());
if!old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know
// it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
// Move the view to the front
self.layers.borrow_mut().update(window.layer, |windows| {
windows.retain(|s| *s!= window);
windows.push(window.clone());
});
// Activate the new window
window.surface().set_activated(true);
// Tell the seat to have the keyboard enter this window. wlroots will keep
// track of this and automatically send key events to the appropriate
// clients without additional work on your part.
let keyboard = wlr_seat_get_keyboard(self.seat_manager.raw_seat());
wlr_seat_keyboard_notify_enter(
self.seat_manager.raw_seat(),
wlr_surface,
(*keyboard).keycodes.as_mut_ptr(),
(*keyboard).num_keycodes,
&mut (*keyboard).modifiers,
);
}
self.wm_policy_manager.advise_focused_window(window);
}
/// Blurs the currently focused window without focusing another one
pub fn blur(&self) {
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface;
if!old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know
// it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
wlr_seat_keyboard_clear_focus(self.seat_manager.raw_seat());
}
}
}
pub(crate) trait WindowManagerExt {
fn set_output_manager(&self, output_manager: Rc<OutputManager>);
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window>;
}
impl WindowManagerExt for Rc<WindowManager> {
fn set_output_manager(&self, output_manager: Rc<OutputManager>) {
*self.output_manager.borrow_mut() = Rc::downgrade(&output_manager);
let window_manager = self.clone();
output_manager
.on_output_layout_change()
.subscribe(Box::new(move |_| {
for window in window_manager.layers.borrow().all_windows() {
window.update_outputs();
}
}));
}
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window> {
let window = Rc::new(Window {
output_manager: self.output_manager.borrow().upgrade().expect("window_manager should be initialized with and output_manager before windows can be created"),
window_manager: self.clone(),
layer,
surface,
mapped: RefCell::new(false),
top_left: RefCell::new(Point::ZERO),
translate: RefCell::new(Displacement::ZERO),
outputs: RefCell::new(vec![]),
minimize_targets: RefCell::new(vec![]),
pending_updates: RefCell::new(BTreeMap::new()),
on_entered_output: Event::default(),
on_left_output: Event::default(),
on_destroy: EventOnce::default(),
event_manager: RefCell::new(None),
});
// If the window can receive focus, add it to the back so that
// the window management policy can choose if it want to focus the
// window
if window.can_receive_focus() | else {
self.layers.borrow_mut().update(layer, |windows| {
windows.push(window.clone());
})
}
window
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::input::{cursor::CursorManager, event_filter::EventFilterManager};
use crate::output_manager::OutputManager;
use crate::window::WindowEventHandler;
use crate::{config::ConfigManager, window_management_policy::WmPolicyManager};
use std::ptr;
use std::rc::Rc;
#[test]
fn it_drops_and_cleans_up_on_destroy() {
let config_manager = Rc::new(ConfigManager::default());
let wm_policy_manager = Rc::new(WmPolicyManager::new());
let seat_manager = SeatManager::mock(ptr::null_mut(), ptr::null_mut());
let window_manager = Rc::new(WindowManager::init(
wm_policy_manager.clone(),
seat_manager.clone(),
ptr::null_mut(),
));
let output_manager = OutputManager::mock(
config_manager,
wm_policy_manager.clone(),
window_manager.clone(),
);
let cursor_manager = CursorManager::mock(
output_manager.clone(),
window_manager.clone(),
seat_manager.clone(),
Rc::new(EventFilterManager::new()),
ptr::null_mut(),
ptr::null_mut(),
);
window_manager.set_output_manager(output_manager.clone());
let window = window_manager.new_window(WindowLayer::Normal, Surface::Null);
let mut event_handler = WindowEventHandler {
wm_policy_manager,
output_manager: output_manager.clone(),
window_manager: window_manager.clone(),
cursor_manager: cursor_manager.clone(),
window: Rc::downgrade(&window),
foreign_toplevel_handle: None,
foreign_toplevel_event_manager: None,
};
let weak_window = Rc::downgrade(&window);
drop(window);
assert!(window_manager.windows().count() == 1);
assert!(weak_window.upgrade().is_some());
event_handler.destroy();
assert!(window_manager.windows().count() == 0);
assert!(weak_window.upgrade().is_none());
}
}
#[cfg(test)]
unsafe fn wlr_foreign_toplevel_manager_v1_create(
_display: *mut wl_display,
) -> *mut wlr_foreign_toplevel_manager_v1 {
std::ptr::null_mut()
}
| {
self.layers.borrow_mut().update(layer, |windows| {
windows.insert(0, window.clone());
})
} | conditional_block |
window_manager.rs | use crate::geometry::{Displacement, Point};
use crate::surface::{Surface, SurfaceExt};
use crate::{
event::{Event, EventOnce},
input::seat::SeatManager,
output_manager::OutputManager,
window::Window,
window_management_policy::WmPolicyManager,
};
use log::{trace, warn};
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::rc::{Rc, Weak};
use wlroots_sys::*;
#[derive(Debug, Copy, Clone)]
pub enum WindowLayer {
Background,
Bottom,
Normal,
Top,
Overlay,
}
#[derive(Default)]
struct WindowLayers {
background: Vec<Rc<Window>>,
bottom: Vec<Rc<Window>>,
normal: Vec<Rc<Window>>,
top: Vec<Rc<Window>>,
overlay: Vec<Rc<Window>>,
}
impl WindowLayers {
fn all_windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
self
.background
.iter()
.chain(self.bottom.iter())
.chain(self.normal.iter())
.chain(self.top.iter())
.chain(self.overlay.iter())
.cloned()
}
fn update<F>(&mut self, layer: WindowLayer, mut f: F)
where
F: FnMut(&mut Vec<Rc<Window>>),
{
match layer {
WindowLayer::Background => f(&mut self.background),
WindowLayer::Bottom => f(&mut self.bottom),
WindowLayer::Normal => f(&mut self.normal),
WindowLayer::Top => f(&mut self.top),
WindowLayer::Overlay => f(&mut self.overlay),
}
}
}
pub struct WindowManager {
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
output_manager: RefCell<Weak<OutputManager>>,
layers: RefCell<WindowLayers>,
foreign_toplevel_manager: *mut wlr_foreign_toplevel_manager_v1,
}
impl std::fmt::Debug for WindowManager {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
write!(
fmt,
"WindowManager {{windows: {0}}}",
self.layers.borrow().normal.len()
)
}
}
impl WindowManager {
pub(crate) fn init(
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
display: *mut wl_display,
) -> WindowManager {
let foreign_toplevel_manager = unsafe { wlr_foreign_toplevel_manager_v1_create(display) };
WindowManager {
wm_policy_manager,
seat_manager,
output_manager: RefCell::new(Weak::<OutputManager>::new()),
layers: RefCell::new(WindowLayers::default()),
foreign_toplevel_manager,
}
}
pub fn raw_foreign_toplevel_manager(&self) -> *mut wlr_foreign_toplevel_manager_v1 {
self.foreign_toplevel_manager
}
pub fn | (&self) -> impl '_ + Iterator<Item = Rc<Window>> {
self.windows().filter(|window| *window.mapped.borrow())
}
pub fn window_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.extents().contains(point))
}
pub(crate) fn window_buffer_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.buffer_extents().contains(point))
}
pub(crate) fn destroy_window(&self, destroyed_window: Rc<Window>) {
self
.layers
.borrow_mut()
.update(destroyed_window.layer, |windows| {
windows.retain(|window| *window!= destroyed_window)
});
}
pub fn windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
let windows = self.layers.borrow().all_windows().collect::<Vec<_>>();
windows.into_iter()
}
/// Returns the window that holds keyboard focus
pub fn focused_window(&self) -> Option<Rc<Window>> {
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
self
.layers
.borrow()
.all_windows()
.find(|w| w.wlr_surface() == focused_surface)
}
/// If the window have keyboard focus
pub fn window_has_focus(&self, window: &Window) -> bool {
let wlr_surface = window.wlr_surface();
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
wlr_surface == focused_surface
}
/// Gives keyboard focus to the window
pub fn focus_window(&self, window: Rc<Window>) {
if!window.can_receive_focus() {
warn!("Window can not receive focus");
return;
}
if!self.seat_manager.is_input_allowed(&window) {
warn!("Refusing to set focus, input is inhibited");
return;
}
let wlr_surface = window.wlr_surface();
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface;
if wlr_surface == old_wlr_surface {
return;
}
trace!("Focusing window \"{:?}\"", window.title());
if!old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know
// it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
// Move the view to the front
self.layers.borrow_mut().update(window.layer, |windows| {
windows.retain(|s| *s!= window);
windows.push(window.clone());
});
// Activate the new window
window.surface().set_activated(true);
// Tell the seat to have the keyboard enter this window. wlroots will keep
// track of this and automatically send key events to the appropriate
// clients without additional work on your part.
let keyboard = wlr_seat_get_keyboard(self.seat_manager.raw_seat());
wlr_seat_keyboard_notify_enter(
self.seat_manager.raw_seat(),
wlr_surface,
(*keyboard).keycodes.as_mut_ptr(),
(*keyboard).num_keycodes,
&mut (*keyboard).modifiers,
);
}
self.wm_policy_manager.advise_focused_window(window);
}
/// Blurs the currently focused window without focusing another one
pub fn blur(&self) {
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface;
if!old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know
// it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
wlr_seat_keyboard_clear_focus(self.seat_manager.raw_seat());
}
}
}
pub(crate) trait WindowManagerExt {
fn set_output_manager(&self, output_manager: Rc<OutputManager>);
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window>;
}
impl WindowManagerExt for Rc<WindowManager> {
fn set_output_manager(&self, output_manager: Rc<OutputManager>) {
*self.output_manager.borrow_mut() = Rc::downgrade(&output_manager);
let window_manager = self.clone();
output_manager
.on_output_layout_change()
.subscribe(Box::new(move |_| {
for window in window_manager.layers.borrow().all_windows() {
window.update_outputs();
}
}));
}
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window> {
let window = Rc::new(Window {
output_manager: self.output_manager.borrow().upgrade().expect("window_manager should be initialized with and output_manager before windows can be created"),
window_manager: self.clone(),
layer,
surface,
mapped: RefCell::new(false),
top_left: RefCell::new(Point::ZERO),
translate: RefCell::new(Displacement::ZERO),
outputs: RefCell::new(vec![]),
minimize_targets: RefCell::new(vec![]),
pending_updates: RefCell::new(BTreeMap::new()),
on_entered_output: Event::default(),
on_left_output: Event::default(),
on_destroy: EventOnce::default(),
event_manager: RefCell::new(None),
});
// If the window can receive focus, add it to the back so that
// the window management policy can choose if it want to focus the
// window
if window.can_receive_focus() {
self.layers.borrow_mut().update(layer, |windows| {
windows.insert(0, window.clone());
})
} else {
self.layers.borrow_mut().update(layer, |windows| {
windows.push(window.clone());
})
}
window
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::input::{cursor::CursorManager, event_filter::EventFilterManager};
use crate::output_manager::OutputManager;
use crate::window::WindowEventHandler;
use crate::{config::ConfigManager, window_management_policy::WmPolicyManager};
use std::ptr;
use std::rc::Rc;
#[test]
fn it_drops_and_cleans_up_on_destroy() {
let config_manager = Rc::new(ConfigManager::default());
let wm_policy_manager = Rc::new(WmPolicyManager::new());
let seat_manager = SeatManager::mock(ptr::null_mut(), ptr::null_mut());
let window_manager = Rc::new(WindowManager::init(
wm_policy_manager.clone(),
seat_manager.clone(),
ptr::null_mut(),
));
let output_manager = OutputManager::mock(
config_manager,
wm_policy_manager.clone(),
window_manager.clone(),
);
let cursor_manager = CursorManager::mock(
output_manager.clone(),
window_manager.clone(),
seat_manager.clone(),
Rc::new(EventFilterManager::new()),
ptr::null_mut(),
ptr::null_mut(),
);
window_manager.set_output_manager(output_manager.clone());
let window = window_manager.new_window(WindowLayer::Normal, Surface::Null);
let mut event_handler = WindowEventHandler {
wm_policy_manager,
output_manager: output_manager.clone(),
window_manager: window_manager.clone(),
cursor_manager: cursor_manager.clone(),
window: Rc::downgrade(&window),
foreign_toplevel_handle: None,
foreign_toplevel_event_manager: None,
};
let weak_window = Rc::downgrade(&window);
drop(window);
assert!(window_manager.windows().count() == 1);
assert!(weak_window.upgrade().is_some());
event_handler.destroy();
assert!(window_manager.windows().count() == 0);
assert!(weak_window.upgrade().is_none());
}
}
#[cfg(test)]
unsafe fn wlr_foreign_toplevel_manager_v1_create(
_display: *mut wl_display,
) -> *mut wlr_foreign_toplevel_manager_v1 {
std::ptr::null_mut()
}
| windows_to_render | identifier_name |
lib.rs | //! This crate should eventually represent the structure at this repo:
//!
//! https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus/testnet1
//!
//! It is not accurate at the moment, we include extra files and we also don't support a few
//! others. We are unable to conform to the repo until we have the following PR merged:
//!
//! https://github.com/sigp/lighthouse/pull/605
//!
use eth2_config::{testnets_dir, *};
use enr::{CombinedKey, Enr};
use ssz::Decode;
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use types::{Address, BeaconState, EthSpec, EthSpecId, YamlConfig};
pub const ADDRESS_FILE: &str = "deposit_contract.txt";
pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt";
pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
pub const GENESIS_STATE_FILE: &str = "genesis.ssz";
pub const YAML_CONFIG_FILE: &str = "config.yaml";
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct HardcodedNet {
pub name: &'static str,
pub genesis_is_known: bool,
pub yaml_config: &'static [u8],
pub deploy_block: &'static [u8],
pub boot_enr: &'static [u8],
pub deposit_contract_address: &'static [u8],
pub genesis_state_bytes: &'static [u8],
}
macro_rules! define_net {
($mod: ident, $include_file: tt) => {{
use eth2_config::$mod::ETH2_NET_DIR;
HardcodedNet {
name: ETH2_NET_DIR.name,
genesis_is_known: ETH2_NET_DIR.genesis_is_known,
yaml_config: $include_file!("../", "config.yaml"),
deploy_block: $include_file!("../", "deploy_block.txt"),
boot_enr: $include_file!("../", "boot_enr.yaml"),
deposit_contract_address: $include_file!("../", "deposit_contract.txt"),
genesis_state_bytes: $include_file!("../", "genesis.ssz"),
}
}};
}
const ALTONA: HardcodedNet = define_net!(altona, include_altona_file);
const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file);
const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file);
const PYRMONT: HardcodedNet = define_net!(pyrmont, include_pyrmont_file);
const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file);
const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file);
const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, PYRMONT, MAINNET, TOLEDO];
pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla";
/// Specifies an Eth2 testnet.
///
/// See the crate-level documentation for more details.
#[derive(Clone, PartialEq, Debug)]
pub struct Eth2TestnetConfig {
pub deposit_contract_address: String,
/// Note: instead of the block where the contract is deployed, it is acceptable to set this
/// value to be the block number where the first deposit occurs.
pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state_bytes: Option<Vec<u8>>,
pub yaml_config: Option<YamlConfig>,
}
impl Eth2TestnetConfig {
/// Returns the default hard coded testnet.
pub fn hard_coded_default() -> Result<Option<Self>, String> {
Self::constant(DEFAULT_HARDCODED_TESTNET)
}
/// When Lighthouse is built it includes zero or more "hardcoded" network specifications. This
/// function allows for instantiating one of these nets by name.
pub fn constant(name: &str) -> Result<Option<Self>, String> {
HARDCODED_NETS
.iter()
.find(|net| net.name == name)
.map(Self::from_hardcoded_net)
.transpose()
}
/// Instantiates `Self` from a `HardcodedNet`.
fn from_hardcoded_net(net: &HardcodedNet) -> Result<Self, String> {
Ok(Self {
deposit_contract_address: serde_yaml::from_reader(net.deposit_contract_address)
.map_err(|e| format!("Unable to parse contract address: {:?}", e))?,
deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block)
.map_err(|e| format!("Unable to parse deploy block: {:?}", e))?,
boot_enr: Some(
serde_yaml::from_reader(net.boot_enr)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state_bytes: Some(net.genesis_state_bytes.to_vec())
.filter(|bytes|!bytes.is_empty()),
yaml_config: Some(
serde_yaml::from_reader(net.yaml_config)
.map_err(|e| format!("Unable to parse yaml config: {:?}", e))?,
),
})
}
/// Returns an identifier that should be used for selecting an `EthSpec` instance for this
/// testnet.
pub fn eth_spec_id(&self) -> Result<EthSpecId, String> {
self.yaml_config
.as_ref()
.ok_or_else(|| "YAML specification file missing".to_string())
.and_then(|config| {
config
.eth_spec_id()
.ok_or_else(|| format!("Unknown CONFIG_NAME: {}", config.config_name))
})
}
/// Returns `true` if this configuration contains a `BeaconState`.
pub fn beacon_state_is_known(&self) -> bool {
self.genesis_state_bytes.is_some()
}
/// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid.
pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> {
let genesis_state_bytes = self
.genesis_state_bytes
.as_ref()
.ok_or_else(|| "Genesis state is unknown".to_string())?;
BeaconState::from_ssz_bytes(genesis_state_bytes)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
}
/// Write the files to the directory.
///
/// Overwrites files if specified to do so.
pub fn write_to_file(&self, base_dir: PathBuf, overwrite: bool) -> Result<(), String> {
if base_dir.exists() &&!overwrite {
return Err("Testnet directory already exists".to_string());
}
self.force_write_to_file(base_dir)
}
/// Write the files to the directory, even if the directory already exists.
pub fn force_write_to_file(&self, base_dir: PathBuf) -> Result<(), String> {
create_dir_all(&base_dir)
.map_err(|e| format!("Unable to create testnet directory: {:?}", e))?;
macro_rules! write_to_yaml_file {
($file: ident, $variable: expr) => {
File::create(base_dir.join($file))
.map_err(|e| format!("Unable to create {}: {:?}", $file, e))
.and_then(|mut file| {
let yaml = serde_yaml::to_string(&$variable)
.map_err(|e| format!("Unable to YAML encode {}: {:?}", $file, e))?;
// Remove the doc header from the YAML file.
//
// This allows us to play nice with other clients that are expecting
// plain-text, not YAML.
let no_doc_header = if yaml.starts_with("---\n") {
&yaml[4..]
} else {
&yaml
};
file.write_all(no_doc_header.as_bytes())
.map_err(|e| format!("Unable to write {}: {:?}", $file, e))
})?;
};
}
write_to_yaml_file!(ADDRESS_FILE, self.deposit_contract_address);
write_to_yaml_file!(DEPLOY_BLOCK_FILE, self.deposit_contract_deploy_block);
if let Some(boot_enr) = &self.boot_enr {
write_to_yaml_file!(BOOT_ENR_FILE, boot_enr);
}
if let Some(yaml_config) = &self.yaml_config {
write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config);
}
// The genesis state is a special case because it uses SSZ, not YAML.
if let Some(genesis_state_bytes) = &self.genesis_state_bytes {
let file = base_dir.join(GENESIS_STATE_FILE);
File::create(&file)
.map_err(|e| format!("Unable to create {:?}: {:?}", file, e))
.and_then(|mut file| {
file.write_all(genesis_state_bytes)
.map_err(|e| format!("Unable to write {:?}: {:?}", file, e))
})?;
}
Ok(())
}
pub fn load(base_dir: PathBuf) -> Result<Self, String> {
macro_rules! load_from_file {
($file: ident) => {
File::open(base_dir.join($file))
.map_err(|e| format!("Unable to open {}: {:?}", $file, e))
.and_then(|file| {
serde_yaml::from_reader(file)
.map_err(|e| format!("Unable to parse {}: {:?}", $file, e))
})?;
};
}
macro_rules! optional_load_from_file {
($file: ident) => {
if base_dir.join($file).exists() {
Some(load_from_file!($file))
} else {
None
}
};
}
let deposit_contract_address = load_from_file!(ADDRESS_FILE);
let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE);
let boot_enr = optional_load_from_file!(BOOT_ENR_FILE);
let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE);
// The genesis state is a special case because it uses SSZ, not YAML.
let genesis_file_path = base_dir.join(GENESIS_STATE_FILE);
let genesis_state_bytes = if genesis_file_path.exists() {
let mut bytes = vec![];
File::open(&genesis_file_path)
.map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e))
.and_then(|mut file| {
file.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read {:?}: {:?}", file, e))
})?;
Some(bytes).filter(|bytes|!bytes.is_empty())
} else {
None
};
Ok(Self {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes,
yaml_config,
})
}
pub fn deposit_contract_address(&self) -> Result<Address, String> {
if self.deposit_contract_address.starts_with("0x") {
self.deposit_contract_address[2..]
.parse()
.map_err(|e| format!("Corrupted address, unable to parse: {:?}", e))
} else {
Err("Corrupted address, must start with 0x".to_string())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ssz::Encode;
use tempdir::TempDir;
use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig};
type E = V012LegacyEthSpec;
#[test]
fn hard_coded_nets_work() {
for net in HARDCODED_NETS {
let config =
Eth2TestnetConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name));
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" | else {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<V012LegacyEthSpec>(&E::default_spec())
.unwrap();
}
assert_eq!(
config.genesis_state_bytes.is_some(),
net.genesis_is_known,
"{:?}",
net.name
);
}
}
#[test]
fn round_trip() {
let spec = &E::default_spec();
let eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
deposit_count: 0,
block_hash: Hash256::zero(),
};
// TODO: figure out how to generate ENR and add some here.
let boot_enr = None;
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
do_test::<E>(boot_enr, genesis_state, yaml_config);
do_test::<E>(None, None, None);
}
fn do_test<E: EthSpec>(
boot_enr: Option<Vec<Enr<CombinedKey>>>,
genesis_state: Option<BeaconState<E>>,
yaml_config: Option<YamlConfig>,
) {
let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir");
let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string();
let deposit_contract_deploy_block = 42;
let testnet: Eth2TestnetConfig = Eth2TestnetConfig {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
yaml_config,
};
testnet
.write_to_file(base_dir.clone(), false)
.expect("should write to file");
let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct");
assert_eq!(testnet, decoded, "should decode as encoded");
}
}
| {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<MainnetEthSpec>(&E::default_spec())
.unwrap();
} | conditional_block |
lib.rs | //! This crate should eventually represent the structure at this repo:
//!
//! https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus/testnet1
//!
//! It is not accurate at the moment, we include extra files and we also don't support a few
//! others. We are unable to conform to the repo until we have the following PR merged:
//!
//! https://github.com/sigp/lighthouse/pull/605
//!
use eth2_config::{testnets_dir, *};
use enr::{CombinedKey, Enr};
use ssz::Decode;
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use types::{Address, BeaconState, EthSpec, EthSpecId, YamlConfig};
pub const ADDRESS_FILE: &str = "deposit_contract.txt";
pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt";
pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
pub const GENESIS_STATE_FILE: &str = "genesis.ssz";
pub const YAML_CONFIG_FILE: &str = "config.yaml";
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct HardcodedNet {
pub name: &'static str,
pub genesis_is_known: bool,
pub yaml_config: &'static [u8],
pub deploy_block: &'static [u8],
pub boot_enr: &'static [u8],
pub deposit_contract_address: &'static [u8],
pub genesis_state_bytes: &'static [u8],
}
macro_rules! define_net {
($mod: ident, $include_file: tt) => {{
use eth2_config::$mod::ETH2_NET_DIR;
HardcodedNet {
name: ETH2_NET_DIR.name,
genesis_is_known: ETH2_NET_DIR.genesis_is_known,
yaml_config: $include_file!("../", "config.yaml"),
deploy_block: $include_file!("../", "deploy_block.txt"),
boot_enr: $include_file!("../", "boot_enr.yaml"),
deposit_contract_address: $include_file!("../", "deposit_contract.txt"),
genesis_state_bytes: $include_file!("../", "genesis.ssz"),
}
}};
}
const ALTONA: HardcodedNet = define_net!(altona, include_altona_file);
const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file);
const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file);
const PYRMONT: HardcodedNet = define_net!(pyrmont, include_pyrmont_file);
const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file);
const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file);
const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, PYRMONT, MAINNET, TOLEDO];
pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla";
/// Specifies an Eth2 testnet.
///
/// See the crate-level documentation for more details.
#[derive(Clone, PartialEq, Debug)]
pub struct Eth2TestnetConfig {
pub deposit_contract_address: String,
/// Note: instead of the block where the contract is deployed, it is acceptable to set this
/// value to be the block number where the first deposit occurs.
pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state_bytes: Option<Vec<u8>>,
pub yaml_config: Option<YamlConfig>,
}
impl Eth2TestnetConfig {
/// Returns the default hard coded testnet.
pub fn hard_coded_default() -> Result<Option<Self>, String> {
Self::constant(DEFAULT_HARDCODED_TESTNET)
}
/// When Lighthouse is built it includes zero or more "hardcoded" network specifications. This
/// function allows for instantiating one of these nets by name.
pub fn constant(name: &str) -> Result<Option<Self>, String> {
HARDCODED_NETS
.iter()
.find(|net| net.name == name)
.map(Self::from_hardcoded_net)
.transpose()
}
/// Instantiates `Self` from a `HardcodedNet`.
fn from_hardcoded_net(net: &HardcodedNet) -> Result<Self, String> {
Ok(Self {
deposit_contract_address: serde_yaml::from_reader(net.deposit_contract_address)
.map_err(|e| format!("Unable to parse contract address: {:?}", e))?,
deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block)
.map_err(|e| format!("Unable to parse deploy block: {:?}", e))?,
boot_enr: Some(
serde_yaml::from_reader(net.boot_enr)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state_bytes: Some(net.genesis_state_bytes.to_vec())
.filter(|bytes|!bytes.is_empty()),
yaml_config: Some(
serde_yaml::from_reader(net.yaml_config)
.map_err(|e| format!("Unable to parse yaml config: {:?}", e))?,
),
})
}
/// Returns an identifier that should be used for selecting an `EthSpec` instance for this
/// testnet.
pub fn eth_spec_id(&self) -> Result<EthSpecId, String> {
self.yaml_config
.as_ref()
.ok_or_else(|| "YAML specification file missing".to_string())
.and_then(|config| {
config
.eth_spec_id()
.ok_or_else(|| format!("Unknown CONFIG_NAME: {}", config.config_name))
})
}
/// Returns `true` if this configuration contains a `BeaconState`.
pub fn beacon_state_is_known(&self) -> bool {
self.genesis_state_bytes.is_some()
}
| .as_ref()
.ok_or_else(|| "Genesis state is unknown".to_string())?;
BeaconState::from_ssz_bytes(genesis_state_bytes)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
}
/// Write the files to the directory.
///
/// Overwrites files if specified to do so.
pub fn write_to_file(&self, base_dir: PathBuf, overwrite: bool) -> Result<(), String> {
if base_dir.exists() &&!overwrite {
return Err("Testnet directory already exists".to_string());
}
self.force_write_to_file(base_dir)
}
/// Write the files to the directory, even if the directory already exists.
pub fn force_write_to_file(&self, base_dir: PathBuf) -> Result<(), String> {
create_dir_all(&base_dir)
.map_err(|e| format!("Unable to create testnet directory: {:?}", e))?;
macro_rules! write_to_yaml_file {
($file: ident, $variable: expr) => {
File::create(base_dir.join($file))
.map_err(|e| format!("Unable to create {}: {:?}", $file, e))
.and_then(|mut file| {
let yaml = serde_yaml::to_string(&$variable)
.map_err(|e| format!("Unable to YAML encode {}: {:?}", $file, e))?;
// Remove the doc header from the YAML file.
//
// This allows us to play nice with other clients that are expecting
// plain-text, not YAML.
let no_doc_header = if yaml.starts_with("---\n") {
&yaml[4..]
} else {
&yaml
};
file.write_all(no_doc_header.as_bytes())
.map_err(|e| format!("Unable to write {}: {:?}", $file, e))
})?;
};
}
write_to_yaml_file!(ADDRESS_FILE, self.deposit_contract_address);
write_to_yaml_file!(DEPLOY_BLOCK_FILE, self.deposit_contract_deploy_block);
if let Some(boot_enr) = &self.boot_enr {
write_to_yaml_file!(BOOT_ENR_FILE, boot_enr);
}
if let Some(yaml_config) = &self.yaml_config {
write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config);
}
// The genesis state is a special case because it uses SSZ, not YAML.
if let Some(genesis_state_bytes) = &self.genesis_state_bytes {
let file = base_dir.join(GENESIS_STATE_FILE);
File::create(&file)
.map_err(|e| format!("Unable to create {:?}: {:?}", file, e))
.and_then(|mut file| {
file.write_all(genesis_state_bytes)
.map_err(|e| format!("Unable to write {:?}: {:?}", file, e))
})?;
}
Ok(())
}
pub fn load(base_dir: PathBuf) -> Result<Self, String> {
macro_rules! load_from_file {
($file: ident) => {
File::open(base_dir.join($file))
.map_err(|e| format!("Unable to open {}: {:?}", $file, e))
.and_then(|file| {
serde_yaml::from_reader(file)
.map_err(|e| format!("Unable to parse {}: {:?}", $file, e))
})?;
};
}
macro_rules! optional_load_from_file {
($file: ident) => {
if base_dir.join($file).exists() {
Some(load_from_file!($file))
} else {
None
}
};
}
let deposit_contract_address = load_from_file!(ADDRESS_FILE);
let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE);
let boot_enr = optional_load_from_file!(BOOT_ENR_FILE);
let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE);
// The genesis state is a special case because it uses SSZ, not YAML.
let genesis_file_path = base_dir.join(GENESIS_STATE_FILE);
let genesis_state_bytes = if genesis_file_path.exists() {
let mut bytes = vec![];
File::open(&genesis_file_path)
.map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e))
.and_then(|mut file| {
file.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read {:?}: {:?}", file, e))
})?;
Some(bytes).filter(|bytes|!bytes.is_empty())
} else {
None
};
Ok(Self {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes,
yaml_config,
})
}
pub fn deposit_contract_address(&self) -> Result<Address, String> {
if self.deposit_contract_address.starts_with("0x") {
self.deposit_contract_address[2..]
.parse()
.map_err(|e| format!("Corrupted address, unable to parse: {:?}", e))
} else {
Err("Corrupted address, must start with 0x".to_string())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ssz::Encode;
use tempdir::TempDir;
use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig};
type E = V012LegacyEthSpec;
#[test]
fn hard_coded_nets_work() {
for net in HARDCODED_NETS {
let config =
Eth2TestnetConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name));
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<MainnetEthSpec>(&E::default_spec())
.unwrap();
} else {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<V012LegacyEthSpec>(&E::default_spec())
.unwrap();
}
assert_eq!(
config.genesis_state_bytes.is_some(),
net.genesis_is_known,
"{:?}",
net.name
);
}
}
#[test]
fn round_trip() {
let spec = &E::default_spec();
let eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
deposit_count: 0,
block_hash: Hash256::zero(),
};
// TODO: figure out how to generate ENR and add some here.
let boot_enr = None;
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
do_test::<E>(boot_enr, genesis_state, yaml_config);
do_test::<E>(None, None, None);
}
fn do_test<E: EthSpec>(
boot_enr: Option<Vec<Enr<CombinedKey>>>,
genesis_state: Option<BeaconState<E>>,
yaml_config: Option<YamlConfig>,
) {
let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir");
let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string();
let deposit_contract_deploy_block = 42;
let testnet: Eth2TestnetConfig = Eth2TestnetConfig {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
yaml_config,
};
testnet
.write_to_file(base_dir.clone(), false)
.expect("should write to file");
let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct");
assert_eq!(testnet, decoded, "should decode as encoded");
}
} | /// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid.
pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> {
let genesis_state_bytes = self
.genesis_state_bytes | random_line_split |
lib.rs | //! This crate should eventually represent the structure at this repo:
//!
//! https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus/testnet1
//!
//! It is not accurate at the moment, we include extra files and we also don't support a few
//! others. We are unable to conform to the repo until we have the following PR merged:
//!
//! https://github.com/sigp/lighthouse/pull/605
//!
use eth2_config::{testnets_dir, *};
use enr::{CombinedKey, Enr};
use ssz::Decode;
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use types::{Address, BeaconState, EthSpec, EthSpecId, YamlConfig};
pub const ADDRESS_FILE: &str = "deposit_contract.txt";
pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt";
pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
pub const GENESIS_STATE_FILE: &str = "genesis.ssz";
pub const YAML_CONFIG_FILE: &str = "config.yaml";
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct HardcodedNet {
pub name: &'static str,
pub genesis_is_known: bool,
pub yaml_config: &'static [u8],
pub deploy_block: &'static [u8],
pub boot_enr: &'static [u8],
pub deposit_contract_address: &'static [u8],
pub genesis_state_bytes: &'static [u8],
}
macro_rules! define_net {
($mod: ident, $include_file: tt) => {{
use eth2_config::$mod::ETH2_NET_DIR;
HardcodedNet {
name: ETH2_NET_DIR.name,
genesis_is_known: ETH2_NET_DIR.genesis_is_known,
yaml_config: $include_file!("../", "config.yaml"),
deploy_block: $include_file!("../", "deploy_block.txt"),
boot_enr: $include_file!("../", "boot_enr.yaml"),
deposit_contract_address: $include_file!("../", "deposit_contract.txt"),
genesis_state_bytes: $include_file!("../", "genesis.ssz"),
}
}};
}
const ALTONA: HardcodedNet = define_net!(altona, include_altona_file);
const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file);
const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file);
const PYRMONT: HardcodedNet = define_net!(pyrmont, include_pyrmont_file);
const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file);
const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file);
const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, PYRMONT, MAINNET, TOLEDO];
pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla";
/// Specifies an Eth2 testnet.
///
/// See the crate-level documentation for more details.
#[derive(Clone, PartialEq, Debug)]
pub struct Eth2TestnetConfig {
pub deposit_contract_address: String,
/// Note: instead of the block where the contract is deployed, it is acceptable to set this
/// value to be the block number where the first deposit occurs.
pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state_bytes: Option<Vec<u8>>,
pub yaml_config: Option<YamlConfig>,
}
impl Eth2TestnetConfig {
/// Returns the default hard coded testnet.
pub fn hard_coded_default() -> Result<Option<Self>, String> {
Self::constant(DEFAULT_HARDCODED_TESTNET)
}
/// When Lighthouse is built it includes zero or more "hardcoded" network specifications. This
/// function allows for instantiating one of these nets by name.
pub fn constant(name: &str) -> Result<Option<Self>, String> {
HARDCODED_NETS
.iter()
.find(|net| net.name == name)
.map(Self::from_hardcoded_net)
.transpose()
}
/// Instantiates `Self` from a `HardcodedNet`.
fn from_hardcoded_net(net: &HardcodedNet) -> Result<Self, String> {
Ok(Self {
deposit_contract_address: serde_yaml::from_reader(net.deposit_contract_address)
.map_err(|e| format!("Unable to parse contract address: {:?}", e))?,
deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block)
.map_err(|e| format!("Unable to parse deploy block: {:?}", e))?,
boot_enr: Some(
serde_yaml::from_reader(net.boot_enr)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state_bytes: Some(net.genesis_state_bytes.to_vec())
.filter(|bytes|!bytes.is_empty()),
yaml_config: Some(
serde_yaml::from_reader(net.yaml_config)
.map_err(|e| format!("Unable to parse yaml config: {:?}", e))?,
),
})
}
/// Returns an identifier that should be used for selecting an `EthSpec` instance for this
/// testnet.
pub fn eth_spec_id(&self) -> Result<EthSpecId, String> {
self.yaml_config
.as_ref()
.ok_or_else(|| "YAML specification file missing".to_string())
.and_then(|config| {
config
.eth_spec_id()
.ok_or_else(|| format!("Unknown CONFIG_NAME: {}", config.config_name))
})
}
/// Returns `true` if this configuration contains a `BeaconState`.
pub fn beacon_state_is_known(&self) -> bool {
self.genesis_state_bytes.is_some()
}
/// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid.
pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> {
let genesis_state_bytes = self
.genesis_state_bytes
.as_ref()
.ok_or_else(|| "Genesis state is unknown".to_string())?;
BeaconState::from_ssz_bytes(genesis_state_bytes)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
}
/// Write the files to the directory.
///
/// Overwrites files if specified to do so.
pub fn | (&self, base_dir: PathBuf, overwrite: bool) -> Result<(), String> {
if base_dir.exists() &&!overwrite {
return Err("Testnet directory already exists".to_string());
}
self.force_write_to_file(base_dir)
}
/// Write the files to the directory, even if the directory already exists.
pub fn force_write_to_file(&self, base_dir: PathBuf) -> Result<(), String> {
create_dir_all(&base_dir)
.map_err(|e| format!("Unable to create testnet directory: {:?}", e))?;
macro_rules! write_to_yaml_file {
($file: ident, $variable: expr) => {
File::create(base_dir.join($file))
.map_err(|e| format!("Unable to create {}: {:?}", $file, e))
.and_then(|mut file| {
let yaml = serde_yaml::to_string(&$variable)
.map_err(|e| format!("Unable to YAML encode {}: {:?}", $file, e))?;
// Remove the doc header from the YAML file.
//
// This allows us to play nice with other clients that are expecting
// plain-text, not YAML.
let no_doc_header = if yaml.starts_with("---\n") {
&yaml[4..]
} else {
&yaml
};
file.write_all(no_doc_header.as_bytes())
.map_err(|e| format!("Unable to write {}: {:?}", $file, e))
})?;
};
}
write_to_yaml_file!(ADDRESS_FILE, self.deposit_contract_address);
write_to_yaml_file!(DEPLOY_BLOCK_FILE, self.deposit_contract_deploy_block);
if let Some(boot_enr) = &self.boot_enr {
write_to_yaml_file!(BOOT_ENR_FILE, boot_enr);
}
if let Some(yaml_config) = &self.yaml_config {
write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config);
}
// The genesis state is a special case because it uses SSZ, not YAML.
if let Some(genesis_state_bytes) = &self.genesis_state_bytes {
let file = base_dir.join(GENESIS_STATE_FILE);
File::create(&file)
.map_err(|e| format!("Unable to create {:?}: {:?}", file, e))
.and_then(|mut file| {
file.write_all(genesis_state_bytes)
.map_err(|e| format!("Unable to write {:?}: {:?}", file, e))
})?;
}
Ok(())
}
pub fn load(base_dir: PathBuf) -> Result<Self, String> {
macro_rules! load_from_file {
($file: ident) => {
File::open(base_dir.join($file))
.map_err(|e| format!("Unable to open {}: {:?}", $file, e))
.and_then(|file| {
serde_yaml::from_reader(file)
.map_err(|e| format!("Unable to parse {}: {:?}", $file, e))
})?;
};
}
macro_rules! optional_load_from_file {
($file: ident) => {
if base_dir.join($file).exists() {
Some(load_from_file!($file))
} else {
None
}
};
}
let deposit_contract_address = load_from_file!(ADDRESS_FILE);
let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE);
let boot_enr = optional_load_from_file!(BOOT_ENR_FILE);
let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE);
// The genesis state is a special case because it uses SSZ, not YAML.
let genesis_file_path = base_dir.join(GENESIS_STATE_FILE);
let genesis_state_bytes = if genesis_file_path.exists() {
let mut bytes = vec![];
File::open(&genesis_file_path)
.map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e))
.and_then(|mut file| {
file.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read {:?}: {:?}", file, e))
})?;
Some(bytes).filter(|bytes|!bytes.is_empty())
} else {
None
};
Ok(Self {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes,
yaml_config,
})
}
pub fn deposit_contract_address(&self) -> Result<Address, String> {
if self.deposit_contract_address.starts_with("0x") {
self.deposit_contract_address[2..]
.parse()
.map_err(|e| format!("Corrupted address, unable to parse: {:?}", e))
} else {
Err("Corrupted address, must start with 0x".to_string())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ssz::Encode;
use tempdir::TempDir;
use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig};
type E = V012LegacyEthSpec;
#[test]
fn hard_coded_nets_work() {
for net in HARDCODED_NETS {
let config =
Eth2TestnetConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name));
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<MainnetEthSpec>(&E::default_spec())
.unwrap();
} else {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<V012LegacyEthSpec>(&E::default_spec())
.unwrap();
}
assert_eq!(
config.genesis_state_bytes.is_some(),
net.genesis_is_known,
"{:?}",
net.name
);
}
}
#[test]
fn round_trip() {
let spec = &E::default_spec();
let eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
deposit_count: 0,
block_hash: Hash256::zero(),
};
// TODO: figure out how to generate ENR and add some here.
let boot_enr = None;
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
do_test::<E>(boot_enr, genesis_state, yaml_config);
do_test::<E>(None, None, None);
}
fn do_test<E: EthSpec>(
boot_enr: Option<Vec<Enr<CombinedKey>>>,
genesis_state: Option<BeaconState<E>>,
yaml_config: Option<YamlConfig>,
) {
let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir");
let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string();
let deposit_contract_deploy_block = 42;
let testnet: Eth2TestnetConfig = Eth2TestnetConfig {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
yaml_config,
};
testnet
.write_to_file(base_dir.clone(), false)
.expect("should write to file");
let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct");
assert_eq!(testnet, decoded, "should decode as encoded");
}
}
| write_to_file | identifier_name |
lib.rs | //! This crate should eventually represent the structure at this repo:
//!
//! https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus/testnet1
//!
//! It is not accurate at the moment, we include extra files and we also don't support a few
//! others. We are unable to conform to the repo until we have the following PR merged:
//!
//! https://github.com/sigp/lighthouse/pull/605
//!
use eth2_config::{testnets_dir, *};
use enr::{CombinedKey, Enr};
use ssz::Decode;
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use types::{Address, BeaconState, EthSpec, EthSpecId, YamlConfig};
pub const ADDRESS_FILE: &str = "deposit_contract.txt";
pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt";
pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
pub const GENESIS_STATE_FILE: &str = "genesis.ssz";
pub const YAML_CONFIG_FILE: &str = "config.yaml";
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct HardcodedNet {
pub name: &'static str,
pub genesis_is_known: bool,
pub yaml_config: &'static [u8],
pub deploy_block: &'static [u8],
pub boot_enr: &'static [u8],
pub deposit_contract_address: &'static [u8],
pub genesis_state_bytes: &'static [u8],
}
macro_rules! define_net {
($mod: ident, $include_file: tt) => {{
use eth2_config::$mod::ETH2_NET_DIR;
HardcodedNet {
name: ETH2_NET_DIR.name,
genesis_is_known: ETH2_NET_DIR.genesis_is_known,
yaml_config: $include_file!("../", "config.yaml"),
deploy_block: $include_file!("../", "deploy_block.txt"),
boot_enr: $include_file!("../", "boot_enr.yaml"),
deposit_contract_address: $include_file!("../", "deposit_contract.txt"),
genesis_state_bytes: $include_file!("../", "genesis.ssz"),
}
}};
}
const ALTONA: HardcodedNet = define_net!(altona, include_altona_file);
const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file);
const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file);
const PYRMONT: HardcodedNet = define_net!(pyrmont, include_pyrmont_file);
const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file);
const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file);
const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, PYRMONT, MAINNET, TOLEDO];
pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla";
/// Specifies an Eth2 testnet.
///
/// See the crate-level documentation for more details.
#[derive(Clone, PartialEq, Debug)]
pub struct Eth2TestnetConfig {
pub deposit_contract_address: String,
/// Note: instead of the block where the contract is deployed, it is acceptable to set this
/// value to be the block number where the first deposit occurs.
pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state_bytes: Option<Vec<u8>>,
pub yaml_config: Option<YamlConfig>,
}
impl Eth2TestnetConfig {
/// Returns the default hard coded testnet.
pub fn hard_coded_default() -> Result<Option<Self>, String> {
Self::constant(DEFAULT_HARDCODED_TESTNET)
}
/// When Lighthouse is built it includes zero or more "hardcoded" network specifications. This
/// function allows for instantiating one of these nets by name.
pub fn constant(name: &str) -> Result<Option<Self>, String> {
HARDCODED_NETS
.iter()
.find(|net| net.name == name)
.map(Self::from_hardcoded_net)
.transpose()
}
/// Instantiates `Self` from a `HardcodedNet`.
fn from_hardcoded_net(net: &HardcodedNet) -> Result<Self, String> {
Ok(Self {
deposit_contract_address: serde_yaml::from_reader(net.deposit_contract_address)
.map_err(|e| format!("Unable to parse contract address: {:?}", e))?,
deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block)
.map_err(|e| format!("Unable to parse deploy block: {:?}", e))?,
boot_enr: Some(
serde_yaml::from_reader(net.boot_enr)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state_bytes: Some(net.genesis_state_bytes.to_vec())
.filter(|bytes|!bytes.is_empty()),
yaml_config: Some(
serde_yaml::from_reader(net.yaml_config)
.map_err(|e| format!("Unable to parse yaml config: {:?}", e))?,
),
})
}
/// Returns an identifier that should be used for selecting an `EthSpec` instance for this
/// testnet.
pub fn eth_spec_id(&self) -> Result<EthSpecId, String> {
self.yaml_config
.as_ref()
.ok_or_else(|| "YAML specification file missing".to_string())
.and_then(|config| {
config
.eth_spec_id()
.ok_or_else(|| format!("Unknown CONFIG_NAME: {}", config.config_name))
})
}
/// Returns `true` if this configuration contains a `BeaconState`.
pub fn beacon_state_is_known(&self) -> bool |
/// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid.
pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> {
let genesis_state_bytes = self
.genesis_state_bytes
.as_ref()
.ok_or_else(|| "Genesis state is unknown".to_string())?;
BeaconState::from_ssz_bytes(genesis_state_bytes)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
}
/// Write the files to the directory.
///
/// Overwrites files if specified to do so.
pub fn write_to_file(&self, base_dir: PathBuf, overwrite: bool) -> Result<(), String> {
if base_dir.exists() &&!overwrite {
return Err("Testnet directory already exists".to_string());
}
self.force_write_to_file(base_dir)
}
/// Write the files to the directory, even if the directory already exists.
pub fn force_write_to_file(&self, base_dir: PathBuf) -> Result<(), String> {
create_dir_all(&base_dir)
.map_err(|e| format!("Unable to create testnet directory: {:?}", e))?;
macro_rules! write_to_yaml_file {
($file: ident, $variable: expr) => {
File::create(base_dir.join($file))
.map_err(|e| format!("Unable to create {}: {:?}", $file, e))
.and_then(|mut file| {
let yaml = serde_yaml::to_string(&$variable)
.map_err(|e| format!("Unable to YAML encode {}: {:?}", $file, e))?;
// Remove the doc header from the YAML file.
//
// This allows us to play nice with other clients that are expecting
// plain-text, not YAML.
let no_doc_header = if yaml.starts_with("---\n") {
&yaml[4..]
} else {
&yaml
};
file.write_all(no_doc_header.as_bytes())
.map_err(|e| format!("Unable to write {}: {:?}", $file, e))
})?;
};
}
write_to_yaml_file!(ADDRESS_FILE, self.deposit_contract_address);
write_to_yaml_file!(DEPLOY_BLOCK_FILE, self.deposit_contract_deploy_block);
if let Some(boot_enr) = &self.boot_enr {
write_to_yaml_file!(BOOT_ENR_FILE, boot_enr);
}
if let Some(yaml_config) = &self.yaml_config {
write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config);
}
// The genesis state is a special case because it uses SSZ, not YAML.
if let Some(genesis_state_bytes) = &self.genesis_state_bytes {
let file = base_dir.join(GENESIS_STATE_FILE);
File::create(&file)
.map_err(|e| format!("Unable to create {:?}: {:?}", file, e))
.and_then(|mut file| {
file.write_all(genesis_state_bytes)
.map_err(|e| format!("Unable to write {:?}: {:?}", file, e))
})?;
}
Ok(())
}
pub fn load(base_dir: PathBuf) -> Result<Self, String> {
macro_rules! load_from_file {
($file: ident) => {
File::open(base_dir.join($file))
.map_err(|e| format!("Unable to open {}: {:?}", $file, e))
.and_then(|file| {
serde_yaml::from_reader(file)
.map_err(|e| format!("Unable to parse {}: {:?}", $file, e))
})?;
};
}
macro_rules! optional_load_from_file {
($file: ident) => {
if base_dir.join($file).exists() {
Some(load_from_file!($file))
} else {
None
}
};
}
let deposit_contract_address = load_from_file!(ADDRESS_FILE);
let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE);
let boot_enr = optional_load_from_file!(BOOT_ENR_FILE);
let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE);
// The genesis state is a special case because it uses SSZ, not YAML.
let genesis_file_path = base_dir.join(GENESIS_STATE_FILE);
let genesis_state_bytes = if genesis_file_path.exists() {
let mut bytes = vec![];
File::open(&genesis_file_path)
.map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e))
.and_then(|mut file| {
file.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read {:?}: {:?}", file, e))
})?;
Some(bytes).filter(|bytes|!bytes.is_empty())
} else {
None
};
Ok(Self {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes,
yaml_config,
})
}
pub fn deposit_contract_address(&self) -> Result<Address, String> {
if self.deposit_contract_address.starts_with("0x") {
self.deposit_contract_address[2..]
.parse()
.map_err(|e| format!("Corrupted address, unable to parse: {:?}", e))
} else {
Err("Corrupted address, must start with 0x".to_string())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ssz::Encode;
use tempdir::TempDir;
use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig};
type E = V012LegacyEthSpec;
#[test]
fn hard_coded_nets_work() {
for net in HARDCODED_NETS {
let config =
Eth2TestnetConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name));
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<MainnetEthSpec>(&E::default_spec())
.unwrap();
} else {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<V012LegacyEthSpec>(&E::default_spec())
.unwrap();
}
assert_eq!(
config.genesis_state_bytes.is_some(),
net.genesis_is_known,
"{:?}",
net.name
);
}
}
#[test]
fn round_trip() {
let spec = &E::default_spec();
let eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
deposit_count: 0,
block_hash: Hash256::zero(),
};
// TODO: figure out how to generate ENR and add some here.
let boot_enr = None;
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
do_test::<E>(boot_enr, genesis_state, yaml_config);
do_test::<E>(None, None, None);
}
fn do_test<E: EthSpec>(
boot_enr: Option<Vec<Enr<CombinedKey>>>,
genesis_state: Option<BeaconState<E>>,
yaml_config: Option<YamlConfig>,
) {
let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir");
let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string();
let deposit_contract_deploy_block = 42;
let testnet: Eth2TestnetConfig = Eth2TestnetConfig {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
yaml_config,
};
testnet
.write_to_file(base_dir.clone(), false)
.expect("should write to file");
let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct");
assert_eq!(testnet, decoded, "should decode as encoded");
}
}
| {
self.genesis_state_bytes.is_some()
} | identifier_body |
action.rs | use crate::core::ValueType;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::fmt;
use std::ops::{Neg, Sub};
type SignalType = u8;
const BOUND: SignalType = SignalType::MAX;
const BOUND_FLOAT: f64 = BOUND as f64;
/// Action is basic type of Indicator's signals
///
/// It may be positive \(means *Buy* some amount\). It may be negative \(means *Sell* some amount\). Or there may be no signal at all.
///
/// You can convert `Action` to *analog* `i8` value using [`analog()`](Action::analog) method, where:
/// * `1` means *buy*;
/// * `-1` means *sell*;
/// * `0` means no signal.
///
/// You can convert `Action` to *digital* `Option<f64>` value using [`ratio()`](Action::ratio) method with internal value in range \[`-1.0`; `1.0`\], where:
/// * negative value means *sell* some portion;
/// * positive value means *buy* some potion;
/// * zero value means there is no distinct decision;
/// * [`None`](core::option::Option::None) means no signal.
#[derive(Clone, Copy, Eq, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum Action {
/// Buy signal
Buy(SignalType),
/// No signal
None,
/// Sell signal
Sell(SignalType),
}
impl Action {
/// Shortcut for *Buy All* signal
pub const BUY_ALL: Self = Self::Buy(BOUND);
/// Shortcut for *Sell All* signal
pub const SELL_ALL: Self = Self::Sell(BOUND);
/// Create instance from *analog* signal (which can be only `-1`, `0` or `1`)
///
/// Any positive number converts to `BUY_ALL`
///
/// Any negative number converts to `SELL_ALL`
///
/// Zero converts to None
#[must_use]
pub fn from_analog(value: i8) -> Self {
Self::from(value)
}
/// Converts value with the interval \[`-1.0`; `1.0`\]
#[must_use]
pub fn ratio(self) -> Option<ValueType> {
self.into()
}
/// Returns a sign (`1` or `-1`) of internal value if value exists and not zero.
///
/// Otherwise returns `0`.
#[must_use]
pub fn analog(self) -> i8 {
self.into()
}
/// Returns a sign of internal value if value exists
///
/// Otherwise returns None
#[must_use]
pub fn | (self) -> Option<i8> {
self.into()
}
/// Return an internal representation of the value if signal exists or None if it doesn't.
#[must_use]
pub const fn value(self) -> Option<SignalType> {
match self {
Self::None => None,
Self::Buy(v) | Self::Sell(v) => Some(v),
}
}
/// Checks if there is no signal
#[must_use]
pub const fn is_none(self) -> bool {
matches!(self, Self::None)
}
/// Checks if there is signal
#[must_use]
pub const fn is_some(self) -> bool {
!self.is_none()
}
}
impl PartialEq for Action {
fn eq(&self, other: &Self) -> bool {
match (*self, *other) {
(Self::None, Self::None)
| (Self::Buy(0), Self::Sell(0))
| (Self::Sell(0), Self::Buy(0)) => true,
(Self::Buy(a), Self::Buy(b)) | (Self::Sell(a), Self::Sell(b)) => a == b,
_ => false,
}
}
}
impl Default for Action {
fn default() -> Self {
Self::None
}
}
impl From<bool> for Action {
fn from(value: bool) -> Self {
if value {
Self::BUY_ALL
} else {
Self::None
}
}
}
impl From<i8> for Action {
fn from(value: i8) -> Self {
match value {
0 => Self::None,
v => {
if v > 0 {
Self::BUY_ALL
} else {
Self::SELL_ALL
}
}
}
}
}
impl From<Action> for i8 {
fn from(value: Action) -> Self {
match value {
Action::Buy(value) => (value > 0) as Self,
Action::None => 0,
Action::Sell(value) => -((value > 0) as Self),
}
}
}
impl From<Option<i8>> for Action {
fn from(value: Option<i8>) -> Self {
match value {
None => Self::None,
Some(v) => v.into(),
}
}
}
impl From<Action> for Option<i8> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
_ => Some(value.into()),
}
}
}
#[inline]
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
fn from_normalized_f64_to_bounded(value: f64) -> SignalType {
debug_assert!((0.0..=1.0).contains(&value));
(value * BOUND_FLOAT).round() as SignalType
}
impl From<f64> for Action {
fn from(v: f64) -> Self {
if v.is_nan() {
return Self::None;
}
let normalized = v.max(-1.0).min(1.0);
let value = from_normalized_f64_to_bounded(normalized.abs());
if normalized.is_sign_negative() {
if value == BOUND {
Self::SELL_ALL
} else {
Self::Sell(value)
}
} else if value == BOUND {
Self::BUY_ALL
} else {
Self::Buy(value)
}
}
}
impl From<Option<f64>> for Action {
fn from(value: Option<f64>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<f32> for Action {
#[allow(clippy::cast_possible_truncation)]
fn from(v: f32) -> Self {
Self::from(v as f64)
}
}
impl From<Option<f32>> for Action {
fn from(value: Option<f32>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<Action> for Option<ValueType> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
Action::Buy(value) => Some((value as ValueType) / (BOUND as ValueType)),
Action::Sell(value) => Some(-(value as ValueType) / (BOUND as ValueType)),
}
}
}
impl<T: Into<Self> + Copy> From<&T> for Action {
fn from(value: &T) -> Self {
(*value).into()
}
}
// impl<T: Borrow<Action>> From<T> for i8 {
// fn from(value: T) -> Self {
// //value.
// }
// }
impl Neg for Action {
type Output = Self;
fn neg(self) -> Self::Output {
match self {
Self::None => Self::None,
Self::Buy(value) => Self::Sell(value),
Self::Sell(value) => Self::Buy(value),
}
}
}
impl Sub for Action {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Self::None, Self::None) => Self::None,
(s, Self::None) => s,
(Self::None, s) => -s,
(Self::Buy(v1), Self::Buy(v2)) => {
if v1 >= v2 {
Self::Buy(v1 - v2)
} else {
Self::Sell(v2 - v1)
}
}
(Self::Sell(v1), Self::Sell(v2)) => {
if v1 >= v2 {
Self::Sell(v1 - v2)
} else {
Self::Buy(v2 - v1)
}
}
(s1, s2) => s1 - (-s2),
}
}
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(value) => write!(f, "+{}", value),
Self::Sell(value) => write!(f, "-{}", value),
}
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(_) => write!(f, "+{:.2}", self.ratio().unwrap()),
Self::Sell(_) => write!(f, "-{:.2}", self.ratio().unwrap().abs()),
}
}
}
#[cfg(test)]
mod tests {
use super::{Action, BOUND};
use crate::core::ValueType;
use std::cmp::Ordering;
#[test]
fn test_action_ratio() {
assert_eq!(Some(1.0), Action::Buy(BOUND).ratio());
assert_eq!(Some(-1.0), Action::Sell(BOUND).ratio());
assert_eq!(Some(0.0), Action::Sell(0).ratio());
assert_eq!(Some(0.0), Action::Buy(0).ratio());
assert_eq!(Action::Sell(0), Action::Buy(0));
}
#[test]
fn test_action_from_float() {
let half_bound = if BOUND % 2 == 1 {
BOUND / 2 + 1
} else {
BOUND / 2
};
// f64
assert_eq!(Action::from(0.0_f64), Action::Buy(0));
assert_eq!(Action::from(-0.5_f64), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f64), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f64), Action::SELL_ALL);
// f32
assert_eq!(Action::from(0.0_f32), Action::Buy(0));
assert_eq!(Action::from(-0.5_f32), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f32), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f32), Action::SELL_ALL);
// other
assert_eq!(Action::from(1. / BOUND as ValueType), Action::Buy(1));
assert_eq!(Action::from(-1. / BOUND as ValueType), Action::Sell(1));
assert_eq!(Action::from(-2. / BOUND as ValueType), Action::Sell(2));
}
#[test]
fn test_action_from_into() {
(1..=BOUND).for_each(|x| {
let action = if x < BOUND {
Action::Buy(x)
} else {
Action::BUY_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio > 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
let action = if x < BOUND {
Action::Sell(x)
} else {
Action::SELL_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio < 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
});
}
#[test]
fn test_action_from_float_histogram() {
let half_value = Action::Buy(1).ratio().unwrap() / 2.0;
let delta = if cfg!(feature = "value_type_f32") {
1e-7
} else {
1e-15
};
println!("{}", delta);
(0..=BOUND).for_each(|x| {
let xx = x as ValueType;
assert_eq!(Action::Buy(x), (half_value * 2. * xx).into());
assert_eq!(Action::Sell(x), (-half_value * 2. * xx).into());
if x > 0 {
let y = x - 1;
assert_eq!(
Action::Buy(y),
(half_value * 2. * xx - half_value - delta).into()
);
assert_eq!(
Action::Sell(y),
(-(half_value * 2. * xx - half_value - delta)).into()
);
}
});
assert_eq!(Action::Buy(1), (half_value * 3. - delta).into());
assert_eq!(Action::Buy(2), (half_value * 3.).into());
}
#[test]
fn test_action_from_i8() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(s);
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_from_i8_optional() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(Some(s));
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_neg() {
(0..=BOUND).for_each(|x| {
let s = Action::Buy(x);
let b = Action::Sell(x);
assert_eq!(s, -b);
assert_eq!(-s, b);
});
}
#[test]
#[allow(clippy::eq_op)]
fn test_action_eq() {
assert_eq!(Action::None, Action::None);
assert_ne!(Action::Buy(0), Action::None);
assert_ne!(Action::Sell(0), Action::None);
assert_eq!(Action::Buy(0), Action::Buy(0));
assert_eq!(Action::Sell(0), Action::Sell(0));
assert_eq!(Action::Buy(0), Action::Sell(0));
assert_eq!(Action::Sell(0), Action::Buy(0));
assert_ne!(Action::Sell(2), Action::Buy(5));
assert_ne!(Action::Buy(2), Action::Sell(5));
assert_ne!(Action::Buy(2), Action::Buy(5));
assert_eq!(Action::Buy(5), Action::Buy(5));
assert_ne!(Action::Sell(2), Action::Sell(5));
assert_eq!(Action::Sell(5), Action::Sell(5));
}
}
| sign | identifier_name |
action.rs | use crate::core::ValueType;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::fmt;
use std::ops::{Neg, Sub};
type SignalType = u8;
const BOUND: SignalType = SignalType::MAX;
const BOUND_FLOAT: f64 = BOUND as f64;
/// Action is basic type of Indicator's signals
///
/// It may be positive \(means *Buy* some amount\). It may be negative \(means *Sell* some amount\). Or there may be no signal at all.
///
/// You can convert `Action` to *analog* `i8` value using [`analog()`](Action::analog) method, where:
/// * `1` means *buy*;
/// * `-1` means *sell*;
/// * `0` means no signal.
///
/// You can convert `Action` to *digital* `Option<f64>` value using [`ratio()`](Action::ratio) method with internal value in range \[`-1.0`; `1.0`\], where:
/// * negative value means *sell* some portion;
/// * positive value means *buy* some potion;
/// * zero value means there is no distinct decision;
/// * [`None`](core::option::Option::None) means no signal.
#[derive(Clone, Copy, Eq, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum Action {
/// Buy signal
Buy(SignalType),
/// No signal
None,
/// Sell signal
Sell(SignalType),
}
impl Action {
/// Shortcut for *Buy All* signal
pub const BUY_ALL: Self = Self::Buy(BOUND);
/// Shortcut for *Sell All* signal
pub const SELL_ALL: Self = Self::Sell(BOUND);
/// Create instance from *analog* signal (which can be only `-1`, `0` or `1`)
///
/// Any positive number converts to `BUY_ALL`
///
/// Any negative number converts to `SELL_ALL`
///
/// Zero converts to None
#[must_use]
pub fn from_analog(value: i8) -> Self {
Self::from(value)
}
/// Converts value with the interval \[`-1.0`; `1.0`\]
#[must_use]
pub fn ratio(self) -> Option<ValueType> {
self.into()
}
/// Returns a sign (`1` or `-1`) of internal value if value exists and not zero.
///
/// Otherwise returns `0`.
#[must_use]
pub fn analog(self) -> i8 {
self.into()
}
/// Returns a sign of internal value if value exists
///
/// Otherwise returns None
#[must_use]
pub fn sign(self) -> Option<i8> {
self.into()
}
/// Return an internal representation of the value if signal exists or None if it doesn't.
#[must_use]
pub const fn value(self) -> Option<SignalType> {
match self {
Self::None => None,
Self::Buy(v) | Self::Sell(v) => Some(v),
}
}
/// Checks if there is no signal
#[must_use]
pub const fn is_none(self) -> bool {
matches!(self, Self::None)
}
/// Checks if there is signal
#[must_use]
pub const fn is_some(self) -> bool {
!self.is_none()
}
}
impl PartialEq for Action {
fn eq(&self, other: &Self) -> bool {
match (*self, *other) {
(Self::None, Self::None)
| (Self::Buy(0), Self::Sell(0))
| (Self::Sell(0), Self::Buy(0)) => true,
(Self::Buy(a), Self::Buy(b)) | (Self::Sell(a), Self::Sell(b)) => a == b,
_ => false,
}
}
}
impl Default for Action {
fn default() -> Self {
Self::None
}
}
impl From<bool> for Action {
fn from(value: bool) -> Self {
if value {
Self::BUY_ALL
} else {
Self::None
}
}
}
impl From<i8> for Action {
fn from(value: i8) -> Self {
match value {
0 => Self::None,
v => {
if v > 0 {
Self::BUY_ALL
} else {
Self::SELL_ALL
}
}
}
}
}
impl From<Action> for i8 {
fn from(value: Action) -> Self {
match value {
Action::Buy(value) => (value > 0) as Self,
Action::None => 0,
Action::Sell(value) => -((value > 0) as Self),
}
}
}
| Some(v) => v.into(),
}
}
}
impl From<Action> for Option<i8> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
_ => Some(value.into()),
}
}
}
#[inline]
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
fn from_normalized_f64_to_bounded(value: f64) -> SignalType {
debug_assert!((0.0..=1.0).contains(&value));
(value * BOUND_FLOAT).round() as SignalType
}
impl From<f64> for Action {
fn from(v: f64) -> Self {
if v.is_nan() {
return Self::None;
}
let normalized = v.max(-1.0).min(1.0);
let value = from_normalized_f64_to_bounded(normalized.abs());
if normalized.is_sign_negative() {
if value == BOUND {
Self::SELL_ALL
} else {
Self::Sell(value)
}
} else if value == BOUND {
Self::BUY_ALL
} else {
Self::Buy(value)
}
}
}
impl From<Option<f64>> for Action {
fn from(value: Option<f64>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<f32> for Action {
#[allow(clippy::cast_possible_truncation)]
fn from(v: f32) -> Self {
Self::from(v as f64)
}
}
impl From<Option<f32>> for Action {
fn from(value: Option<f32>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<Action> for Option<ValueType> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
Action::Buy(value) => Some((value as ValueType) / (BOUND as ValueType)),
Action::Sell(value) => Some(-(value as ValueType) / (BOUND as ValueType)),
}
}
}
impl<T: Into<Self> + Copy> From<&T> for Action {
fn from(value: &T) -> Self {
(*value).into()
}
}
// impl<T: Borrow<Action>> From<T> for i8 {
// fn from(value: T) -> Self {
// //value.
// }
// }
impl Neg for Action {
type Output = Self;
fn neg(self) -> Self::Output {
match self {
Self::None => Self::None,
Self::Buy(value) => Self::Sell(value),
Self::Sell(value) => Self::Buy(value),
}
}
}
impl Sub for Action {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Self::None, Self::None) => Self::None,
(s, Self::None) => s,
(Self::None, s) => -s,
(Self::Buy(v1), Self::Buy(v2)) => {
if v1 >= v2 {
Self::Buy(v1 - v2)
} else {
Self::Sell(v2 - v1)
}
}
(Self::Sell(v1), Self::Sell(v2)) => {
if v1 >= v2 {
Self::Sell(v1 - v2)
} else {
Self::Buy(v2 - v1)
}
}
(s1, s2) => s1 - (-s2),
}
}
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(value) => write!(f, "+{}", value),
Self::Sell(value) => write!(f, "-{}", value),
}
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(_) => write!(f, "+{:.2}", self.ratio().unwrap()),
Self::Sell(_) => write!(f, "-{:.2}", self.ratio().unwrap().abs()),
}
}
}
#[cfg(test)]
mod tests {
use super::{Action, BOUND};
use crate::core::ValueType;
use std::cmp::Ordering;
#[test]
fn test_action_ratio() {
assert_eq!(Some(1.0), Action::Buy(BOUND).ratio());
assert_eq!(Some(-1.0), Action::Sell(BOUND).ratio());
assert_eq!(Some(0.0), Action::Sell(0).ratio());
assert_eq!(Some(0.0), Action::Buy(0).ratio());
assert_eq!(Action::Sell(0), Action::Buy(0));
}
#[test]
fn test_action_from_float() {
let half_bound = if BOUND % 2 == 1 {
BOUND / 2 + 1
} else {
BOUND / 2
};
// f64
assert_eq!(Action::from(0.0_f64), Action::Buy(0));
assert_eq!(Action::from(-0.5_f64), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f64), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f64), Action::SELL_ALL);
// f32
assert_eq!(Action::from(0.0_f32), Action::Buy(0));
assert_eq!(Action::from(-0.5_f32), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f32), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f32), Action::SELL_ALL);
// other
assert_eq!(Action::from(1. / BOUND as ValueType), Action::Buy(1));
assert_eq!(Action::from(-1. / BOUND as ValueType), Action::Sell(1));
assert_eq!(Action::from(-2. / BOUND as ValueType), Action::Sell(2));
}
#[test]
fn test_action_from_into() {
(1..=BOUND).for_each(|x| {
let action = if x < BOUND {
Action::Buy(x)
} else {
Action::BUY_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio > 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
let action = if x < BOUND {
Action::Sell(x)
} else {
Action::SELL_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio < 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
});
}
#[test]
fn test_action_from_float_histogram() {
let half_value = Action::Buy(1).ratio().unwrap() / 2.0;
let delta = if cfg!(feature = "value_type_f32") {
1e-7
} else {
1e-15
};
println!("{}", delta);
(0..=BOUND).for_each(|x| {
let xx = x as ValueType;
assert_eq!(Action::Buy(x), (half_value * 2. * xx).into());
assert_eq!(Action::Sell(x), (-half_value * 2. * xx).into());
if x > 0 {
let y = x - 1;
assert_eq!(
Action::Buy(y),
(half_value * 2. * xx - half_value - delta).into()
);
assert_eq!(
Action::Sell(y),
(-(half_value * 2. * xx - half_value - delta)).into()
);
}
});
assert_eq!(Action::Buy(1), (half_value * 3. - delta).into());
assert_eq!(Action::Buy(2), (half_value * 3.).into());
}
#[test]
fn test_action_from_i8() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(s);
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_from_i8_optional() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(Some(s));
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_neg() {
(0..=BOUND).for_each(|x| {
let s = Action::Buy(x);
let b = Action::Sell(x);
assert_eq!(s, -b);
assert_eq!(-s, b);
});
}
#[test]
#[allow(clippy::eq_op)]
fn test_action_eq() {
assert_eq!(Action::None, Action::None);
assert_ne!(Action::Buy(0), Action::None);
assert_ne!(Action::Sell(0), Action::None);
assert_eq!(Action::Buy(0), Action::Buy(0));
assert_eq!(Action::Sell(0), Action::Sell(0));
assert_eq!(Action::Buy(0), Action::Sell(0));
assert_eq!(Action::Sell(0), Action::Buy(0));
assert_ne!(Action::Sell(2), Action::Buy(5));
assert_ne!(Action::Buy(2), Action::Sell(5));
assert_ne!(Action::Buy(2), Action::Buy(5));
assert_eq!(Action::Buy(5), Action::Buy(5));
assert_ne!(Action::Sell(2), Action::Sell(5));
assert_eq!(Action::Sell(5), Action::Sell(5));
}
} | impl From<Option<i8>> for Action {
fn from(value: Option<i8>) -> Self {
match value {
None => Self::None, | random_line_split |
action.rs | use crate::core::ValueType;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::fmt;
use std::ops::{Neg, Sub};
type SignalType = u8;
const BOUND: SignalType = SignalType::MAX;
const BOUND_FLOAT: f64 = BOUND as f64;
/// Action is basic type of Indicator's signals
///
/// It may be positive \(means *Buy* some amount\). It may be negative \(means *Sell* some amount\). Or there may be no signal at all.
///
/// You can convert `Action` to *analog* `i8` value using [`analog()`](Action::analog) method, where:
/// * `1` means *buy*;
/// * `-1` means *sell*;
/// * `0` means no signal.
///
/// You can convert `Action` to *digital* `Option<f64>` value using [`ratio()`](Action::ratio) method with internal value in range \[`-1.0`; `1.0`\], where:
/// * negative value means *sell* some portion;
/// * positive value means *buy* some potion;
/// * zero value means there is no distinct decision;
/// * [`None`](core::option::Option::None) means no signal.
#[derive(Clone, Copy, Eq, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum Action {
/// Buy signal
Buy(SignalType),
/// No signal
None,
/// Sell signal
Sell(SignalType),
}
impl Action {
/// Shortcut for *Buy All* signal
pub const BUY_ALL: Self = Self::Buy(BOUND);
/// Shortcut for *Sell All* signal
pub const SELL_ALL: Self = Self::Sell(BOUND);
/// Create instance from *analog* signal (which can be only `-1`, `0` or `1`)
///
/// Any positive number converts to `BUY_ALL`
///
/// Any negative number converts to `SELL_ALL`
///
/// Zero converts to None
#[must_use]
pub fn from_analog(value: i8) -> Self {
Self::from(value)
}
/// Converts value with the interval \[`-1.0`; `1.0`\]
#[must_use]
pub fn ratio(self) -> Option<ValueType> {
self.into()
}
/// Returns a sign (`1` or `-1`) of internal value if value exists and not zero.
///
/// Otherwise returns `0`.
#[must_use]
pub fn analog(self) -> i8 {
self.into()
}
/// Returns a sign of internal value if value exists
///
/// Otherwise returns None
#[must_use]
pub fn sign(self) -> Option<i8> {
self.into()
}
/// Return an internal representation of the value if signal exists or None if it doesn't.
#[must_use]
pub const fn value(self) -> Option<SignalType> {
match self {
Self::None => None,
Self::Buy(v) | Self::Sell(v) => Some(v),
}
}
/// Checks if there is no signal
#[must_use]
pub const fn is_none(self) -> bool {
matches!(self, Self::None)
}
/// Checks if there is signal
#[must_use]
pub const fn is_some(self) -> bool {
!self.is_none()
}
}
impl PartialEq for Action {
fn eq(&self, other: &Self) -> bool {
match (*self, *other) {
(Self::None, Self::None)
| (Self::Buy(0), Self::Sell(0))
| (Self::Sell(0), Self::Buy(0)) => true,
(Self::Buy(a), Self::Buy(b)) | (Self::Sell(a), Self::Sell(b)) => a == b,
_ => false,
}
}
}
impl Default for Action {
fn default() -> Self {
Self::None
}
}
impl From<bool> for Action {
fn from(value: bool) -> Self {
if value {
Self::BUY_ALL
} else {
Self::None
}
}
}
impl From<i8> for Action {
fn from(value: i8) -> Self {
match value {
0 => Self::None,
v => {
if v > 0 {
Self::BUY_ALL
} else {
Self::SELL_ALL
}
}
}
}
}
impl From<Action> for i8 {
fn from(value: Action) -> Self {
match value {
Action::Buy(value) => (value > 0) as Self,
Action::None => 0,
Action::Sell(value) => -((value > 0) as Self),
}
}
}
impl From<Option<i8>> for Action {
fn from(value: Option<i8>) -> Self |
}
impl From<Action> for Option<i8> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
_ => Some(value.into()),
}
}
}
#[inline]
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
fn from_normalized_f64_to_bounded(value: f64) -> SignalType {
debug_assert!((0.0..=1.0).contains(&value));
(value * BOUND_FLOAT).round() as SignalType
}
impl From<f64> for Action {
fn from(v: f64) -> Self {
if v.is_nan() {
return Self::None;
}
let normalized = v.max(-1.0).min(1.0);
let value = from_normalized_f64_to_bounded(normalized.abs());
if normalized.is_sign_negative() {
if value == BOUND {
Self::SELL_ALL
} else {
Self::Sell(value)
}
} else if value == BOUND {
Self::BUY_ALL
} else {
Self::Buy(value)
}
}
}
impl From<Option<f64>> for Action {
fn from(value: Option<f64>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<f32> for Action {
#[allow(clippy::cast_possible_truncation)]
fn from(v: f32) -> Self {
Self::from(v as f64)
}
}
impl From<Option<f32>> for Action {
fn from(value: Option<f32>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<Action> for Option<ValueType> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
Action::Buy(value) => Some((value as ValueType) / (BOUND as ValueType)),
Action::Sell(value) => Some(-(value as ValueType) / (BOUND as ValueType)),
}
}
}
impl<T: Into<Self> + Copy> From<&T> for Action {
fn from(value: &T) -> Self {
(*value).into()
}
}
// impl<T: Borrow<Action>> From<T> for i8 {
// fn from(value: T) -> Self {
// //value.
// }
// }
impl Neg for Action {
type Output = Self;
fn neg(self) -> Self::Output {
match self {
Self::None => Self::None,
Self::Buy(value) => Self::Sell(value),
Self::Sell(value) => Self::Buy(value),
}
}
}
impl Sub for Action {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Self::None, Self::None) => Self::None,
(s, Self::None) => s,
(Self::None, s) => -s,
(Self::Buy(v1), Self::Buy(v2)) => {
if v1 >= v2 {
Self::Buy(v1 - v2)
} else {
Self::Sell(v2 - v1)
}
}
(Self::Sell(v1), Self::Sell(v2)) => {
if v1 >= v2 {
Self::Sell(v1 - v2)
} else {
Self::Buy(v2 - v1)
}
}
(s1, s2) => s1 - (-s2),
}
}
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(value) => write!(f, "+{}", value),
Self::Sell(value) => write!(f, "-{}", value),
}
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(_) => write!(f, "+{:.2}", self.ratio().unwrap()),
Self::Sell(_) => write!(f, "-{:.2}", self.ratio().unwrap().abs()),
}
}
}
#[cfg(test)]
mod tests {
use super::{Action, BOUND};
use crate::core::ValueType;
use std::cmp::Ordering;
#[test]
fn test_action_ratio() {
assert_eq!(Some(1.0), Action::Buy(BOUND).ratio());
assert_eq!(Some(-1.0), Action::Sell(BOUND).ratio());
assert_eq!(Some(0.0), Action::Sell(0).ratio());
assert_eq!(Some(0.0), Action::Buy(0).ratio());
assert_eq!(Action::Sell(0), Action::Buy(0));
}
#[test]
fn test_action_from_float() {
let half_bound = if BOUND % 2 == 1 {
BOUND / 2 + 1
} else {
BOUND / 2
};
// f64
assert_eq!(Action::from(0.0_f64), Action::Buy(0));
assert_eq!(Action::from(-0.5_f64), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f64), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f64), Action::SELL_ALL);
// f32
assert_eq!(Action::from(0.0_f32), Action::Buy(0));
assert_eq!(Action::from(-0.5_f32), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f32), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f32), Action::SELL_ALL);
// other
assert_eq!(Action::from(1. / BOUND as ValueType), Action::Buy(1));
assert_eq!(Action::from(-1. / BOUND as ValueType), Action::Sell(1));
assert_eq!(Action::from(-2. / BOUND as ValueType), Action::Sell(2));
}
#[test]
fn test_action_from_into() {
(1..=BOUND).for_each(|x| {
let action = if x < BOUND {
Action::Buy(x)
} else {
Action::BUY_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio > 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
let action = if x < BOUND {
Action::Sell(x)
} else {
Action::SELL_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio < 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
});
}
#[test]
fn test_action_from_float_histogram() {
let half_value = Action::Buy(1).ratio().unwrap() / 2.0;
let delta = if cfg!(feature = "value_type_f32") {
1e-7
} else {
1e-15
};
println!("{}", delta);
(0..=BOUND).for_each(|x| {
let xx = x as ValueType;
assert_eq!(Action::Buy(x), (half_value * 2. * xx).into());
assert_eq!(Action::Sell(x), (-half_value * 2. * xx).into());
if x > 0 {
let y = x - 1;
assert_eq!(
Action::Buy(y),
(half_value * 2. * xx - half_value - delta).into()
);
assert_eq!(
Action::Sell(y),
(-(half_value * 2. * xx - half_value - delta)).into()
);
}
});
assert_eq!(Action::Buy(1), (half_value * 3. - delta).into());
assert_eq!(Action::Buy(2), (half_value * 3.).into());
}
#[test]
fn test_action_from_i8() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(s);
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_from_i8_optional() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(Some(s));
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_neg() {
(0..=BOUND).for_each(|x| {
let s = Action::Buy(x);
let b = Action::Sell(x);
assert_eq!(s, -b);
assert_eq!(-s, b);
});
}
#[test]
#[allow(clippy::eq_op)]
fn test_action_eq() {
assert_eq!(Action::None, Action::None);
assert_ne!(Action::Buy(0), Action::None);
assert_ne!(Action::Sell(0), Action::None);
assert_eq!(Action::Buy(0), Action::Buy(0));
assert_eq!(Action::Sell(0), Action::Sell(0));
assert_eq!(Action::Buy(0), Action::Sell(0));
assert_eq!(Action::Sell(0), Action::Buy(0));
assert_ne!(Action::Sell(2), Action::Buy(5));
assert_ne!(Action::Buy(2), Action::Sell(5));
assert_ne!(Action::Buy(2), Action::Buy(5));
assert_eq!(Action::Buy(5), Action::Buy(5));
assert_ne!(Action::Sell(2), Action::Sell(5));
assert_eq!(Action::Sell(5), Action::Sell(5));
}
}
| {
match value {
None => Self::None,
Some(v) => v.into(),
}
} | identifier_body |
cli.rs | use std::process;
use std::str::FromStr;
use std::string::ToString;
use console::Term;
use structopt::StructOpt;
use crate::bat::assets::HighlightingAssets;
use crate::bat::output::PagingMode;
use crate::config;
use crate::style;
#[derive(StructOpt, Clone, Debug)]
#[structopt(
name = "delta",
about = "A syntax-highlighter for git and diff output",
after_help = "\
Colors
------
All delta color options work the same way. There are two ways to specify a color:
1. RGB hex code
An example of passing an RGB hex code is:
--file-color=\"#0e7c0e\"
2. ANSI color name
There are 8 ANSI color names:
black, red, green, yellow, blue, magenta, cyan, white.
In addition, all of them have a bright form:
bright-black, bright-red, bright-green, bright-yellow, bright-blue, bright-magenta, bright-cyan, bright-white
An example is:
--file-color=\"green\"
Unlike RGB hex codes, ANSI color names are just names: you can choose the exact color that each
name corresponds to in the settings of your terminal application (the application you use to run
command line programs). This means that if you use ANSI color names, and you change the color
theme used by your terminal, then delta's colors will respond automatically, without needing to
change the delta command line.
\"purple\" is accepted as a synonym for \"magenta\". Color names and codes are case-insensitive.
"
)]
pub struct Opt {
/// Use default colors appropriate for a light terminal background. For more control, see the other
/// color options.
#[structopt(long = "light")]
pub light: bool,
/// Use default colors appropriate for a dark terminal background. For more control, see the
/// other color options.
#[structopt(long = "dark")]
pub dark: bool,
#[structopt(long = "minus-color")]
/// The background color to use for removed lines.
pub minus_color: Option<String>,
#[structopt(long = "minus-emph-color")]
/// The background color to use for emphasized sections of removed lines.
pub minus_emph_color: Option<String>,
#[structopt(long = "plus-color")]
/// The background color to use for added lines.
pub plus_color: Option<String>,
#[structopt(long = "plus-emph-color")]
/// The background color to use for emphasized sections of added lines.
pub plus_emph_color: Option<String>,
#[structopt(long = "theme", env = "BAT_THEME")]
/// The code syntax highlighting theme to use. Use --theme=none to disable syntax highlighting.
/// If the theme is not set using this option, it will be taken from the BAT_THEME environment
/// variable, if that contains a valid theme name. Use --list-themes and --compare-themes to
/// view available themes. Note that the choice of theme only affects code syntax highlighting.
/// See --commit-color, --file-color, --hunk-color to configure the colors of other parts of
/// the diff output.
pub theme: Option<String>,
#[structopt(long = "highlight-removed")]
/// Apply syntax highlighting to removed lines. The default is to
/// apply syntax highlighting to unchanged and new lines only.
pub highlight_removed: bool,
#[structopt(long = "commit-style", default_value = "plain")]
/// Formatting style for the commit section of git output. Options
/// are: plain, box.
pub commit_style: SectionStyle,
#[structopt(long = "commit-color", default_value = "yellow")]
/// Color for the commit section of git output.
pub commit_color: String,
#[structopt(long = "file-style", default_value = "underline")]
/// Formatting style for the file section of git output. Options
/// are: plain, box, underline.
pub file_style: SectionStyle,
#[structopt(long = "file-color", default_value = "blue")]
/// Color for the file section of git output.
pub file_color: String,
#[structopt(long = "hunk-style", default_value = "box")]
/// Formatting style for the hunk-marker section of git output. Options
/// are: plain, box.
pub hunk_style: SectionStyle,
#[structopt(long = "hunk-color", default_value = "blue")]
/// Color for the hunk-marker section of git output.
pub hunk_color: String,
/// The width (in characters) of the background color
/// highlighting. By default, the width is the current terminal
/// width. Use --width=variable to apply background colors to the
/// end of each line, without right padding to equal width.
#[structopt(short = "w", long = "width")]
pub width: Option<String>,
/// The number of spaces to replace tab characters with. Use --tabs=0 to pass tab characters
/// through directly, but note that in that case delta will calculate line widths assuming tabs
/// occupy one character's width on the screen: if your terminal renders tabs as more than than
/// one character wide then delta's output will look incorrect.
#[structopt(long = "tabs", default_value = "4")]
pub tab_width: usize,
/// Show the command-line arguments (RGB hex codes) for the background colors that are in
/// effect. The hex codes are displayed with their associated background color. This option can
/// be combined with --light and --dark to view the background colors for those modes. It can
/// also be used to experiment with different RGB hex codes by combining this option with
/// --minus-color, --minus-emph-color, --plus-color, --plus-emph-color.
#[structopt(long = "show-background-colors")]
pub show_background_colors: bool,
/// List supported languages and associated file extensions.
#[structopt(long = "list-languages")]
pub list_languages: bool,
/// List available syntax-highlighting color themes.
#[structopt(long = "list-theme-names")]
pub list_theme_names: bool,
/// List available syntax highlighting themes, each with an example of highlighted diff output.
/// If diff output is supplied on standard input then this will be used for the demo. For
/// example: `git show --color=always | delta --list-themes`.
#[structopt(long = "list-themes")]
pub list_themes: bool,
/// The maximum distance between two lines for them to be inferred to be homologous. Homologous
/// line pairs are highlighted according to the deletion and insertion operations transforming
/// one into the other.
#[structopt(long = "max-line-distance", default_value = "0.3")]
pub max_line_distance: f64,
/// Whether to use a pager when displaying output. Options are: auto, always, and never. The
/// default pager is `less`: this can be altered by setting the environment variables BAT_PAGER
/// or PAGER (BAT_PAGER has priority).
#[structopt(long = "paging", default_value = "auto")]
pub paging_mode: String,
}
#[derive(Clone, Debug, PartialEq)]
pub enum | {
Box,
Plain,
Underline,
}
// TODO: clean up enum parsing and error handling
#[derive(Debug)]
pub enum Error {
SectionStyleParseError,
}
impl FromStr for SectionStyle {
type Err = Error;
fn from_str(s: &str) -> Result<SectionStyle, Error> {
match s.to_lowercase().as_str() {
"box" => Ok(SectionStyle::Box),
"plain" => Ok(SectionStyle::Plain),
"underline" => Ok(SectionStyle::Underline),
_ => Err(Error::SectionStyleParseError),
}
}
}
impl ToString for Error {
fn to_string(&self) -> String {
"".to_string()
}
}
pub fn process_command_line_arguments<'a>(
assets: &'a HighlightingAssets,
opt: &'a Opt,
) -> config::Config<'a> {
if opt.light && opt.dark {
eprintln!("--light and --dark cannot be used together.");
process::exit(1);
}
match &opt.theme {
Some(theme) if!style::is_no_syntax_highlighting_theme_name(&theme) => {
if!assets.theme_set.themes.contains_key(theme.as_str()) {
eprintln!("Invalid theme: '{}'", theme);
process::exit(1);
}
let is_light_theme = style::is_light_theme(&theme);
if is_light_theme && opt.dark {
eprintln!(
"{} is a light theme, but you supplied --dark. \
If you use --theme, you do not need to supply --light or --dark.",
theme
);
process::exit(1);
} else if!is_light_theme && opt.light {
eprintln!(
"{} is a dark theme, but you supplied --light. \
If you use --theme, you do not need to supply --light or --dark.",
theme
);
process::exit(1);
}
}
_ => (),
};
// We do not use the full width, in case `less --status-column` is in effect. See #41 and #10.
// TODO: There seems to be some confusion in the accounting: we are actually leaving 2
// characters unused for less at the right edge of the terminal, despite the subtraction of 1
// here.
let available_terminal_width = (Term::stdout().size().1 - 1) as usize;
let background_color_width = match opt.width.as_ref().map(String::as_str) {
Some("variable") => None,
Some(width) => Some(
width
.parse::<usize>()
.unwrap_or_else(|_| panic!("Invalid width: {}", width)),
),
None => Some(available_terminal_width),
};
let paging_mode = match opt.paging_mode.as_ref() {
"always" => PagingMode::Always,
"never" => PagingMode::Never,
"auto" => PagingMode::QuitIfOneScreen,
_ => {
eprintln!(
"Invalid paging value: {} (valid values are \"always\", \"never\", and \"auto\")",
opt.paging_mode
);
process::exit(1);
}
};
config::get_config(
opt,
&assets.syntax_set,
&assets.theme_set,
available_terminal_width,
background_color_width,
paging_mode,
)
}
| SectionStyle | identifier_name |
cli.rs | use std::process;
use std::str::FromStr;
use std::string::ToString;
use console::Term;
use structopt::StructOpt;
use crate::bat::assets::HighlightingAssets;
use crate::bat::output::PagingMode;
use crate::config;
use crate::style;
#[derive(StructOpt, Clone, Debug)]
#[structopt(
name = "delta",
about = "A syntax-highlighter for git and diff output",
after_help = "\
Colors
------
All delta color options work the same way. There are two ways to specify a color:
1. RGB hex code
An example of passing an RGB hex code is:
--file-color=\"#0e7c0e\"
2. ANSI color name
There are 8 ANSI color names:
black, red, green, yellow, blue, magenta, cyan, white.
In addition, all of them have a bright form:
bright-black, bright-red, bright-green, bright-yellow, bright-blue, bright-magenta, bright-cyan, bright-white
An example is:
--file-color=\"green\"
Unlike RGB hex codes, ANSI color names are just names: you can choose the exact color that each
name corresponds to in the settings of your terminal application (the application you use to run
command line programs). This means that if you use ANSI color names, and you change the color
theme used by your terminal, then delta's colors will respond automatically, without needing to
change the delta command line.
\"purple\" is accepted as a synonym for \"magenta\". Color names and codes are case-insensitive.
"
)]
pub struct Opt {
/// Use default colors appropriate for a light terminal background. For more control, see the other
/// color options.
#[structopt(long = "light")]
pub light: bool,
/// Use default colors appropriate for a dark terminal background. For more control, see the
/// other color options.
#[structopt(long = "dark")]
pub dark: bool,
#[structopt(long = "minus-color")]
/// The background color to use for removed lines.
pub minus_color: Option<String>,
#[structopt(long = "minus-emph-color")]
/// The background color to use for emphasized sections of removed lines.
pub minus_emph_color: Option<String>,
#[structopt(long = "plus-color")]
/// The background color to use for added lines.
pub plus_color: Option<String>,
#[structopt(long = "plus-emph-color")]
/// The background color to use for emphasized sections of added lines.
pub plus_emph_color: Option<String>,
#[structopt(long = "theme", env = "BAT_THEME")]
/// The code syntax highlighting theme to use. Use --theme=none to disable syntax highlighting.
/// If the theme is not set using this option, it will be taken from the BAT_THEME environment
/// variable, if that contains a valid theme name. Use --list-themes and --compare-themes to
/// view available themes. Note that the choice of theme only affects code syntax highlighting.
/// See --commit-color, --file-color, --hunk-color to configure the colors of other parts of
/// the diff output.
pub theme: Option<String>,
#[structopt(long = "highlight-removed")]
/// Apply syntax highlighting to removed lines. The default is to
/// apply syntax highlighting to unchanged and new lines only.
pub highlight_removed: bool,
#[structopt(long = "commit-style", default_value = "plain")]
/// Formatting style for the commit section of git output. Options
/// are: plain, box.
pub commit_style: SectionStyle,
#[structopt(long = "commit-color", default_value = "yellow")]
/// Color for the commit section of git output.
pub commit_color: String,
#[structopt(long = "file-style", default_value = "underline")]
/// Formatting style for the file section of git output. Options
/// are: plain, box, underline.
pub file_style: SectionStyle,
#[structopt(long = "file-color", default_value = "blue")]
/// Color for the file section of git output.
pub file_color: String,
#[structopt(long = "hunk-style", default_value = "box")]
/// Formatting style for the hunk-marker section of git output. Options
/// are: plain, box.
pub hunk_style: SectionStyle,
#[structopt(long = "hunk-color", default_value = "blue")]
/// Color for the hunk-marker section of git output.
pub hunk_color: String,
/// The width (in characters) of the background color
/// highlighting. By default, the width is the current terminal
/// width. Use --width=variable to apply background colors to the
/// end of each line, without right padding to equal width.
#[structopt(short = "w", long = "width")]
pub width: Option<String>,
/// The number of spaces to replace tab characters with. Use --tabs=0 to pass tab characters
/// through directly, but note that in that case delta will calculate line widths assuming tabs
/// occupy one character's width on the screen: if your terminal renders tabs as more than than
/// one character wide then delta's output will look incorrect.
#[structopt(long = "tabs", default_value = "4")]
pub tab_width: usize,
| /// be combined with --light and --dark to view the background colors for those modes. It can
/// also be used to experiment with different RGB hex codes by combining this option with
/// --minus-color, --minus-emph-color, --plus-color, --plus-emph-color.
#[structopt(long = "show-background-colors")]
pub show_background_colors: bool,
/// List supported languages and associated file extensions.
#[structopt(long = "list-languages")]
pub list_languages: bool,
/// List available syntax-highlighting color themes.
#[structopt(long = "list-theme-names")]
pub list_theme_names: bool,
/// List available syntax highlighting themes, each with an example of highlighted diff output.
/// If diff output is supplied on standard input then this will be used for the demo. For
/// example: `git show --color=always | delta --list-themes`.
#[structopt(long = "list-themes")]
pub list_themes: bool,
/// The maximum distance between two lines for them to be inferred to be homologous. Homologous
/// line pairs are highlighted according to the deletion and insertion operations transforming
/// one into the other.
#[structopt(long = "max-line-distance", default_value = "0.3")]
pub max_line_distance: f64,
/// Whether to use a pager when displaying output. Options are: auto, always, and never. The
/// default pager is `less`: this can be altered by setting the environment variables BAT_PAGER
/// or PAGER (BAT_PAGER has priority).
#[structopt(long = "paging", default_value = "auto")]
pub paging_mode: String,
}
#[derive(Clone, Debug, PartialEq)]
pub enum SectionStyle {
Box,
Plain,
Underline,
}
// TODO: clean up enum parsing and error handling
#[derive(Debug)]
pub enum Error {
SectionStyleParseError,
}
impl FromStr for SectionStyle {
type Err = Error;
fn from_str(s: &str) -> Result<SectionStyle, Error> {
match s.to_lowercase().as_str() {
"box" => Ok(SectionStyle::Box),
"plain" => Ok(SectionStyle::Plain),
"underline" => Ok(SectionStyle::Underline),
_ => Err(Error::SectionStyleParseError),
}
}
}
impl ToString for Error {
fn to_string(&self) -> String {
"".to_string()
}
}
pub fn process_command_line_arguments<'a>(
assets: &'a HighlightingAssets,
opt: &'a Opt,
) -> config::Config<'a> {
if opt.light && opt.dark {
eprintln!("--light and --dark cannot be used together.");
process::exit(1);
}
match &opt.theme {
Some(theme) if!style::is_no_syntax_highlighting_theme_name(&theme) => {
if!assets.theme_set.themes.contains_key(theme.as_str()) {
eprintln!("Invalid theme: '{}'", theme);
process::exit(1);
}
let is_light_theme = style::is_light_theme(&theme);
if is_light_theme && opt.dark {
eprintln!(
"{} is a light theme, but you supplied --dark. \
If you use --theme, you do not need to supply --light or --dark.",
theme
);
process::exit(1);
} else if!is_light_theme && opt.light {
eprintln!(
"{} is a dark theme, but you supplied --light. \
If you use --theme, you do not need to supply --light or --dark.",
theme
);
process::exit(1);
}
}
_ => (),
};
// We do not use the full width, in case `less --status-column` is in effect. See #41 and #10.
// TODO: There seems to be some confusion in the accounting: we are actually leaving 2
// characters unused for less at the right edge of the terminal, despite the subtraction of 1
// here.
let available_terminal_width = (Term::stdout().size().1 - 1) as usize;
let background_color_width = match opt.width.as_ref().map(String::as_str) {
Some("variable") => None,
Some(width) => Some(
width
.parse::<usize>()
.unwrap_or_else(|_| panic!("Invalid width: {}", width)),
),
None => Some(available_terminal_width),
};
let paging_mode = match opt.paging_mode.as_ref() {
"always" => PagingMode::Always,
"never" => PagingMode::Never,
"auto" => PagingMode::QuitIfOneScreen,
_ => {
eprintln!(
"Invalid paging value: {} (valid values are \"always\", \"never\", and \"auto\")",
opt.paging_mode
);
process::exit(1);
}
};
config::get_config(
opt,
&assets.syntax_set,
&assets.theme_set,
available_terminal_width,
background_color_width,
paging_mode,
)
} | /// Show the command-line arguments (RGB hex codes) for the background colors that are in
/// effect. The hex codes are displayed with their associated background color. This option can | random_line_split |
runtime.rs | //! An extension to start the tokio runtime at the appropriate time.
use std::fmt::Debug;
use std::sync::Arc;
use std::time::Duration;
use failure::Error;
use futures::future::{self, Future};
use log::{trace, warn};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use spirit::bodies::InnerBody;
use spirit::extension::{Extensible, Extension};
use spirit::{Builder, Spirit};
use structdoc::StructDoc;
use structopt::StructOpt;
use tokio::runtime;
/// A body run on tokio runtime.
///
/// When specifying custom tokio runtime through the [`Runtime`](enum.Runtime.html) extension, this
/// is the future to be run inside the runtime.
pub type TokioBody = Box<dyn Future<Item = (), Error = Error> + Send>;
/// An extension to initialize a tokio runtime as part of spirit.
///
/// The [`FutureInstaller`] in this crate (and as a result pipelines with [`Fragment`]s like
/// [`TcpListen`], [`UdpListen`]) use this to make sure they have a runtime to handle the sockets
/// on.
///
/// If you prefer to specify configuration of the runtime to use, instead of the default one, you
/// can create an instance of this extension yourself and register it *before registering any socket
/// pipelines*, which will take precedence and the sockets will use the one provided by you. You
/// must register it using the [`with_singleton`] method.
///
/// Similarly, if all the pipelines are registered within the [`run`] method (or generally, after
/// building is done), you need to install this manually *before* doing [`run`].
///
/// Note that the provided closures are `FnMut` mostly because `Box<FnOnce>` doesn't work. They
/// will be called just once, so you can use `Option<T>` inside and consume the value by
/// `take.unwrap()`.
///
/// # Runtime configuration
///
/// You may have noticed the callbacks here don't have access to configuration. If you intend to
/// configure eg. the number of threads from user configuration, use the [`ThreadPoolConfig`]
/// instead.
///
/// # Future compatibility
///
/// More variants may be added into the enum at any time. Such change will not be considered a
/// breaking change.
///
/// # Examples
///
/// ```
/// extern crate failure;
/// extern crate serde;
/// #[macro_use]
/// extern crate serde_derive;
/// extern crate spirit;
/// extern crate spirit_tokio;
/// extern crate tokio;
///
/// use std::sync::Arc;
///
/// use failure::Error;
/// use spirit::prelude::*;
/// use spirit_tokio::{HandleListener, TcpListen};
/// use spirit_tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// #[derive(Default, Deserialize)]
/// struct Config {
/// #[serde(default)]
/// listening_socket: Vec<TcpListen>,
/// }
///
/// impl Config {
/// fn listener(&self) -> Vec<TcpListen> {
/// self.listening_socket.clone()
/// }
/// }
///
/// fn connection() -> impl Future<Item = (), Error = Error> {
/// future::ok(()) // Just a dummy implementation
/// }
///
/// fn main() {
/// Spirit::<Empty, Config>::new()
/// // Uses the current thread runtime instead of the default threadpool. This'll create
/// // smaller number of threads.
/// .with_singleton(Runtime::CurrentThread(Box::new(|_| ())))
/// .with(
/// Pipeline::new("listener")
/// .extract_cfg(Config::listener)
/// .transform(HandleListener(|_conn, _cfg: &_| connection()))
/// )
/// .run(|spirit| {
/// # let spirit = Arc::clone(spirit);
/// # std::thread::spawn(move || spirit.terminate());
/// Ok(())
/// });
/// }
/// ```
///
/// [`TcpListen`]: crate::TcpListen
/// [`UdpListen`]: crate::UdpListen
/// [`FutureInstaller`]: crate::installer::FutureInstaller
/// [`Fragment`]: spirit::Fragment
/// [`run`]: spirit::SpiritBuilder::run
/// [`with_singleton`]: spirit::extension::Extension::with_singleton
pub enum | {
/// Use the threadpool runtime.
///
/// The threadpool runtime is the default (both in tokio and spirit).
///
/// This allows you to modify the builder prior to starting it, specifying custom options like
/// number of threads.
ThreadPool(Box<dyn FnMut(&mut runtime::Builder) + Send>),
/// Use the current thread runtime.
///
/// If you prefer to run everything in a single thread, use this variant. The provided closure
/// can modify the builder prior to starting it.
CurrentThread(Box<dyn FnMut(&mut runtime::current_thread::Builder) + Send>),
/// Use completely custom runtime.
///
/// The provided closure should start the runtime and execute the provided future on it,
/// blocking until the runtime becomes empty.
///
/// This allows combining arbitrary runtimes that are not directly supported by either tokio or
/// spirit.
Custom(Box<dyn FnMut(TokioBody) -> Result<(), Error> + Send>),
#[doc(hidden)]
__NonExhaustive__,
// TODO: Support loading this from configuration? But it won't be possible to modify at
// runtime, will it?
}
impl Default for Runtime {
fn default() -> Self {
Runtime::ThreadPool(Box::new(|_| {}))
}
}
impl Runtime {
fn execute<O, C>(self, spirit: &Arc<Spirit<O, C>>, inner: InnerBody) -> Result<(), Error>
where
C: DeserializeOwned + Send + Sync +'static,
O: StructOpt + Send + Sync +'static,
{
let spirit = Arc::clone(spirit);
let fut = future::lazy(move || {
inner.run().map_err(move |e| {
spirit.terminate();
e
})
});
match self {
Runtime::ThreadPool(mut mod_builder) => {
let mut builder = runtime::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?;
runtime.block_on(fut)?;
runtime.block_on_all(future::lazy(|| Ok(())))
}
Runtime::CurrentThread(mut mod_builder) => {
let mut builder = runtime::current_thread::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?;
runtime.block_on(fut)?;
runtime.run().map_err(Error::from)
}
Runtime::Custom(mut callback) => callback(Box::new(fut)),
Runtime::__NonExhaustive__ => unreachable!(),
}
}
}
impl<E> Extension<E> for Runtime
where
E: Extensible<Ok = E>,
E::Config: DeserializeOwned + Send + Sync +'static,
E::Opts: StructOpt + Send + Sync +'static,
{
fn apply(self, ext: E) -> Result<E, Error> {
trace!("Wrapping in tokio runtime");
ext.run_around(|spirit, inner| self.execute(spirit, inner))
}
}
/// A configuration extension for the Tokio Threadpool runtime.
///
/// Using the [`extension`][ThreadPoolConfig::extension] or the
/// [`postprocess_extension`][ThreadPoolConfig::postprocess_extension] provides the [`Runtime`] to
/// the spirit application. However, this allows reading the parameters of the threadpool (mostly
/// number of threads) from the configuration instead of hardcoding it into the application.
///
/// # Panics
///
/// If this is inserted after something already registered a [`Runtime`].
///
/// # Examples
///
/// ```rust
/// use serde::Deserialize;
/// use spirit::prelude::*;
/// use spirit_tokio::runtime::ThreadPoolConfig;
///
/// #[derive(Debug, Default, Deserialize)]
/// struct Cfg {
/// #[serde(default)] // Allow empty configuration with default runtime
/// threadpool: ThreadPoolConfig,
/// }
///
/// impl Cfg {
/// fn threadpool(&self) -> ThreadPoolConfig {
/// self.threadpool.clone()
/// }
/// }
///
/// fn main() {
/// Spirit::<Empty, Cfg>::new()
/// .with(ThreadPoolConfig::extension(Cfg::threadpool))
/// .run(|_| {
/// // This runs inside a configured runtime
/// Ok(())
/// });
/// }
/// ```
#[derive(
Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize, StructDoc, Ord, PartialOrd, Hash,
)]
#[serde(rename_all = "kebab-case")]
pub struct ThreadPoolConfig {
/// Maximum number of asynchronous worker threads.
///
/// These do most of the work. There's little reason to set it to more than number of CPUs, but
/// it may make sense to set it lower.
///
/// If not set, the application will start with number of CPUs available in the system.
#[serde(skip_serializing_if = "Option::is_none")]
pub async_threads: Option<usize>,
/// Maximum number of blocking worker threads.
///
/// These do tasks that take longer time. This includes file IO and CPU intensive tasks.
///
/// If not set, defaults to 100.
///
/// Often, the application doesn't start these threads as they might not always be needed.
#[serde(skip_serializing_if = "Option::is_none")]
pub blocking_threads: Option<usize>,
#[serde(
skip_serializing_if = "Option::is_none",
serialize_with = "spirit::utils::serialize_opt_duration",
deserialize_with = "spirit::utils::deserialize_opt_duration",
default
)]
/// How long to keep an idle thread around.
///
/// A thread will be shut down if it sits around idle for this long. The default (unset) is
/// never to shut it down.
///
/// Accepts human-parsable times, like „3days“ or „5s“.
pub keep_alive: Option<Duration>,
#[serde(skip)]
_sentinel: (),
}
impl ThreadPoolConfig {
/// The extension to be plugged in with [`with`].
///
/// See the [example](#examples).
///
/// [`with`]: spirit::extension::Extension::with
pub fn extension<O, C, F>(extract: F) -> impl Extension<Builder<O, C>>
where
F: Fn(&C) -> Self + Clone + Send + Sync +'static,
O: Debug + StructOpt + Send + Sync +'static,
C: DeserializeOwned + Send + Sync +'static,
{
Self::postprocess_extension(extract, |_: &mut _| ())
}
/// Similar to [`extension`][ThreadPoolConfig::extension], but allows further tweaking.
///
/// This allows to tweak the [threadpool builder][runtime::Builder] after it was pre-configured
/// by the configuration file. This might be desirable, for example, if the application also
/// wants to install an [`after_start`][runtime::Builder::after_start] or set the stack size
/// which either can't or don't make sense to configure by the user.
pub fn postprocess_extension<O, C, F, P>(extract: F, post: P) -> impl Extension<Builder<O, C>>
where
F: Fn(&C) -> Self + Clone + Send + Sync +'static,
P: FnOnce(&mut runtime::Builder) + Send +'static,
O: Debug + StructOpt + Send + Sync +'static,
C: DeserializeOwned + Send + Sync +'static,
{
let mut post = Some(post);
|mut builder: Builder<O, C>| {
assert!(
builder.singleton::<Runtime>(),
"Tokio Runtime already inserted"
);
trace!("Inserting configurable tokio runtime");
builder
.on_config({
let extract = extract.clone();
let mut first = None;
move |_: &O, cfg: &Arc<C>| {
let cfg = extract(cfg);
if first.is_none() {
first = Some(cfg);
} else if first.as_ref()!= Some(&cfg) {
warn!("Tokio threadpool configuration can't be changed at runtime");
}
}
})
.run_around(|spirit, inner| {
Runtime::ThreadPool({
let spirit = Arc::clone(spirit);
Box::new(move |builder| {
let cfg = extract(&spirit.config());
if let Some(threads) = cfg.async_threads {
builder.core_threads(threads);
}
if let Some(threads) = cfg.blocking_threads {
builder.blocking_threads(threads);
}
if let Some(alive) = cfg.keep_alive {
builder.keep_alive(Some(alive));
}
(post.take().unwrap())(builder)
})
})
.execute(spirit, inner)
})
}
}
}
| Runtime | identifier_name |
runtime.rs | //! An extension to start the tokio runtime at the appropriate time.
use std::fmt::Debug;
use std::sync::Arc;
use std::time::Duration;
use failure::Error;
use futures::future::{self, Future};
use log::{trace, warn};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use spirit::bodies::InnerBody;
use spirit::extension::{Extensible, Extension};
use spirit::{Builder, Spirit};
use structdoc::StructDoc;
use structopt::StructOpt;
use tokio::runtime;
/// A body run on tokio runtime.
///
/// When specifying custom tokio runtime through the [`Runtime`](enum.Runtime.html) extension, this
/// is the future to be run inside the runtime.
pub type TokioBody = Box<dyn Future<Item = (), Error = Error> + Send>;
/// An extension to initialize a tokio runtime as part of spirit.
///
/// The [`FutureInstaller`] in this crate (and as a result pipelines with [`Fragment`]s like
/// [`TcpListen`], [`UdpListen`]) use this to make sure they have a runtime to handle the sockets
/// on.
///
/// If you prefer to specify configuration of the runtime to use, instead of the default one, you
/// can create an instance of this extension yourself and register it *before registering any socket
/// pipelines*, which will take precedence and the sockets will use the one provided by you. You
/// must register it using the [`with_singleton`] method.
///
/// Similarly, if all the pipelines are registered within the [`run`] method (or generally, after
/// building is done), you need to install this manually *before* doing [`run`].
///
/// Note that the provided closures are `FnMut` mostly because `Box<FnOnce>` doesn't work. They
/// will be called just once, so you can use `Option<T>` inside and consume the value by
/// `take.unwrap()`.
///
/// # Runtime configuration
///
/// You may have noticed the callbacks here don't have access to configuration. If you intend to
/// configure eg. the number of threads from user configuration, use the [`ThreadPoolConfig`]
/// instead.
///
/// # Future compatibility
///
/// More variants may be added into the enum at any time. Such change will not be considered a
/// breaking change.
///
/// # Examples
///
/// ```
/// extern crate failure;
/// extern crate serde;
/// #[macro_use]
/// extern crate serde_derive;
/// extern crate spirit;
/// extern crate spirit_tokio;
/// extern crate tokio;
///
/// use std::sync::Arc;
///
/// use failure::Error;
/// use spirit::prelude::*;
/// use spirit_tokio::{HandleListener, TcpListen};
/// use spirit_tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// #[derive(Default, Deserialize)]
/// struct Config {
/// #[serde(default)]
/// listening_socket: Vec<TcpListen>,
/// }
///
/// impl Config {
/// fn listener(&self) -> Vec<TcpListen> {
/// self.listening_socket.clone()
/// }
/// }
///
/// fn connection() -> impl Future<Item = (), Error = Error> {
/// future::ok(()) // Just a dummy implementation
/// }
///
/// fn main() {
/// Spirit::<Empty, Config>::new()
/// // Uses the current thread runtime instead of the default threadpool. This'll create
/// // smaller number of threads.
/// .with_singleton(Runtime::CurrentThread(Box::new(|_| ())))
/// .with(
/// Pipeline::new("listener")
/// .extract_cfg(Config::listener)
/// .transform(HandleListener(|_conn, _cfg: &_| connection()))
/// )
/// .run(|spirit| {
/// # let spirit = Arc::clone(spirit);
/// # std::thread::spawn(move || spirit.terminate());
/// Ok(())
/// });
/// }
/// ```
///
/// [`TcpListen`]: crate::TcpListen
/// [`UdpListen`]: crate::UdpListen
/// [`FutureInstaller`]: crate::installer::FutureInstaller
/// [`Fragment`]: spirit::Fragment
/// [`run`]: spirit::SpiritBuilder::run
/// [`with_singleton`]: spirit::extension::Extension::with_singleton
pub enum Runtime {
/// Use the threadpool runtime.
///
/// The threadpool runtime is the default (both in tokio and spirit).
///
/// This allows you to modify the builder prior to starting it, specifying custom options like
/// number of threads.
ThreadPool(Box<dyn FnMut(&mut runtime::Builder) + Send>),
/// Use the current thread runtime.
///
/// If you prefer to run everything in a single thread, use this variant. The provided closure
/// can modify the builder prior to starting it.
CurrentThread(Box<dyn FnMut(&mut runtime::current_thread::Builder) + Send>),
/// Use completely custom runtime.
///
/// The provided closure should start the runtime and execute the provided future on it,
/// blocking until the runtime becomes empty.
///
/// This allows combining arbitrary runtimes that are not directly supported by either tokio or
/// spirit.
Custom(Box<dyn FnMut(TokioBody) -> Result<(), Error> + Send>),
#[doc(hidden)]
__NonExhaustive__,
// TODO: Support loading this from configuration? But it won't be possible to modify at
// runtime, will it?
}
impl Default for Runtime {
fn default() -> Self {
Runtime::ThreadPool(Box::new(|_| {}))
}
}
impl Runtime {
fn execute<O, C>(self, spirit: &Arc<Spirit<O, C>>, inner: InnerBody) -> Result<(), Error>
where
C: DeserializeOwned + Send + Sync +'static,
O: StructOpt + Send + Sync +'static,
| runtime.block_on(fut)?;
runtime.run().map_err(Error::from)
}
Runtime::Custom(mut callback) => callback(Box::new(fut)),
Runtime::__NonExhaustive__ => unreachable!(),
}
}
}
impl<E> Extension<E> for Runtime
where
E: Extensible<Ok = E>,
E::Config: DeserializeOwned + Send + Sync +'static,
E::Opts: StructOpt + Send + Sync +'static,
{
fn apply(self, ext: E) -> Result<E, Error> {
trace!("Wrapping in tokio runtime");
ext.run_around(|spirit, inner| self.execute(spirit, inner))
}
}
/// A configuration extension for the Tokio Threadpool runtime.
///
/// Using the [`extension`][ThreadPoolConfig::extension] or the
/// [`postprocess_extension`][ThreadPoolConfig::postprocess_extension] provides the [`Runtime`] to
/// the spirit application. However, this allows reading the parameters of the threadpool (mostly
/// number of threads) from the configuration instead of hardcoding it into the application.
///
/// # Panics
///
/// If this is inserted after something already registered a [`Runtime`].
///
/// # Examples
///
/// ```rust
/// use serde::Deserialize;
/// use spirit::prelude::*;
/// use spirit_tokio::runtime::ThreadPoolConfig;
///
/// #[derive(Debug, Default, Deserialize)]
/// struct Cfg {
/// #[serde(default)] // Allow empty configuration with default runtime
/// threadpool: ThreadPoolConfig,
/// }
///
/// impl Cfg {
/// fn threadpool(&self) -> ThreadPoolConfig {
/// self.threadpool.clone()
/// }
/// }
///
/// fn main() {
/// Spirit::<Empty, Cfg>::new()
/// .with(ThreadPoolConfig::extension(Cfg::threadpool))
/// .run(|_| {
/// // This runs inside a configured runtime
/// Ok(())
/// });
/// }
/// ```
#[derive(
Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize, StructDoc, Ord, PartialOrd, Hash,
)]
#[serde(rename_all = "kebab-case")]
pub struct ThreadPoolConfig {
/// Maximum number of asynchronous worker threads.
///
/// These do most of the work. There's little reason to set it to more than number of CPUs, but
/// it may make sense to set it lower.
///
/// If not set, the application will start with number of CPUs available in the system.
#[serde(skip_serializing_if = "Option::is_none")]
pub async_threads: Option<usize>,
/// Maximum number of blocking worker threads.
///
/// These do tasks that take longer time. This includes file IO and CPU intensive tasks.
///
/// If not set, defaults to 100.
///
/// Often, the application doesn't start these threads as they might not always be needed.
#[serde(skip_serializing_if = "Option::is_none")]
pub blocking_threads: Option<usize>,
#[serde(
skip_serializing_if = "Option::is_none",
serialize_with = "spirit::utils::serialize_opt_duration",
deserialize_with = "spirit::utils::deserialize_opt_duration",
default
)]
/// How long to keep an idle thread around.
///
/// A thread will be shut down if it sits around idle for this long. The default (unset) is
/// never to shut it down.
///
/// Accepts human-parsable times, like „3days“ or „5s“.
pub keep_alive: Option<Duration>,
#[serde(skip)]
_sentinel: (),
}
impl ThreadPoolConfig {
/// The extension to be plugged in with [`with`].
///
/// See the [example](#examples).
///
/// [`with`]: spirit::extension::Extension::with
pub fn extension<O, C, F>(extract: F) -> impl Extension<Builder<O, C>>
where
F: Fn(&C) -> Self + Clone + Send + Sync +'static,
O: Debug + StructOpt + Send + Sync +'static,
C: DeserializeOwned + Send + Sync +'static,
{
Self::postprocess_extension(extract, |_: &mut _| ())
}
/// Similar to [`extension`][ThreadPoolConfig::extension], but allows further tweaking.
///
/// This allows to tweak the [threadpool builder][runtime::Builder] after it was pre-configured
/// by the configuration file. This might be desirable, for example, if the application also
/// wants to install an [`after_start`][runtime::Builder::after_start] or set the stack size
/// which either can't or don't make sense to configure by the user.
pub fn postprocess_extension<O, C, F, P>(extract: F, post: P) -> impl Extension<Builder<O, C>>
where
F: Fn(&C) -> Self + Clone + Send + Sync +'static,
P: FnOnce(&mut runtime::Builder) + Send +'static,
O: Debug + StructOpt + Send + Sync +'static,
C: DeserializeOwned + Send + Sync +'static,
{
let mut post = Some(post);
|mut builder: Builder<O, C>| {
assert!(
builder.singleton::<Runtime>(),
"Tokio Runtime already inserted"
);
trace!("Inserting configurable tokio runtime");
builder
.on_config({
let extract = extract.clone();
let mut first = None;
move |_: &O, cfg: &Arc<C>| {
let cfg = extract(cfg);
if first.is_none() {
first = Some(cfg);
} else if first.as_ref()!= Some(&cfg) {
warn!("Tokio threadpool configuration can't be changed at runtime");
}
}
})
.run_around(|spirit, inner| {
Runtime::ThreadPool({
let spirit = Arc::clone(spirit);
Box::new(move |builder| {
let cfg = extract(&spirit.config());
if let Some(threads) = cfg.async_threads {
builder.core_threads(threads);
}
if let Some(threads) = cfg.blocking_threads {
builder.blocking_threads(threads);
}
if let Some(alive) = cfg.keep_alive {
builder.keep_alive(Some(alive));
}
(post.take().unwrap())(builder)
})
})
.execute(spirit, inner)
})
}
}
}
| {
let spirit = Arc::clone(spirit);
let fut = future::lazy(move || {
inner.run().map_err(move |e| {
spirit.terminate();
e
})
});
match self {
Runtime::ThreadPool(mut mod_builder) => {
let mut builder = runtime::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?;
runtime.block_on(fut)?;
runtime.block_on_all(future::lazy(|| Ok(())))
}
Runtime::CurrentThread(mut mod_builder) => {
let mut builder = runtime::current_thread::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?; | identifier_body |
runtime.rs | //! An extension to start the tokio runtime at the appropriate time.
use std::fmt::Debug;
use std::sync::Arc;
use std::time::Duration;
use failure::Error;
use futures::future::{self, Future};
use log::{trace, warn};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use spirit::bodies::InnerBody;
use spirit::extension::{Extensible, Extension};
use spirit::{Builder, Spirit};
use structdoc::StructDoc;
use structopt::StructOpt;
use tokio::runtime;
/// A body run on tokio runtime.
///
/// When specifying custom tokio runtime through the [`Runtime`](enum.Runtime.html) extension, this
/// is the future to be run inside the runtime.
pub type TokioBody = Box<dyn Future<Item = (), Error = Error> + Send>;
/// An extension to initialize a tokio runtime as part of spirit.
///
/// The [`FutureInstaller`] in this crate (and as a result pipelines with [`Fragment`]s like
/// [`TcpListen`], [`UdpListen`]) use this to make sure they have a runtime to handle the sockets
/// on.
///
/// If you prefer to specify configuration of the runtime to use, instead of the default one, you
/// can create an instance of this extension yourself and register it *before registering any socket
/// pipelines*, which will take precedence and the sockets will use the one provided by you. You
/// must register it using the [`with_singleton`] method.
///
/// Similarly, if all the pipelines are registered within the [`run`] method (or generally, after
/// building is done), you need to install this manually *before* doing [`run`].
///
/// Note that the provided closures are `FnMut` mostly because `Box<FnOnce>` doesn't work. They
/// will be called just once, so you can use `Option<T>` inside and consume the value by
/// `take.unwrap()`.
///
/// # Runtime configuration
///
/// You may have noticed the callbacks here don't have access to configuration. If you intend to
/// configure eg. the number of threads from user configuration, use the [`ThreadPoolConfig`]
/// instead.
///
/// # Future compatibility
///
/// More variants may be added into the enum at any time. Such change will not be considered a
/// breaking change.
///
/// # Examples
///
/// ```
/// extern crate failure;
/// extern crate serde;
/// #[macro_use]
/// extern crate serde_derive;
/// extern crate spirit;
/// extern crate spirit_tokio;
/// extern crate tokio;
///
/// use std::sync::Arc;
///
/// use failure::Error;
/// use spirit::prelude::*;
/// use spirit_tokio::{HandleListener, TcpListen};
/// use spirit_tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// #[derive(Default, Deserialize)]
/// struct Config {
/// #[serde(default)]
/// listening_socket: Vec<TcpListen>,
/// }
///
/// impl Config {
/// fn listener(&self) -> Vec<TcpListen> {
/// self.listening_socket.clone()
/// }
/// }
///
/// fn connection() -> impl Future<Item = (), Error = Error> {
/// future::ok(()) // Just a dummy implementation
/// }
///
/// fn main() {
/// Spirit::<Empty, Config>::new()
/// // Uses the current thread runtime instead of the default threadpool. This'll create
/// // smaller number of threads.
/// .with_singleton(Runtime::CurrentThread(Box::new(|_| ())))
/// .with(
/// Pipeline::new("listener")
/// .extract_cfg(Config::listener)
/// .transform(HandleListener(|_conn, _cfg: &_| connection()))
/// )
/// .run(|spirit| {
/// # let spirit = Arc::clone(spirit);
/// # std::thread::spawn(move || spirit.terminate());
/// Ok(())
/// });
/// }
/// ```
///
/// [`TcpListen`]: crate::TcpListen
/// [`UdpListen`]: crate::UdpListen
/// [`FutureInstaller`]: crate::installer::FutureInstaller
/// [`Fragment`]: spirit::Fragment
/// [`run`]: spirit::SpiritBuilder::run
/// [`with_singleton`]: spirit::extension::Extension::with_singleton
pub enum Runtime {
/// Use the threadpool runtime.
///
/// The threadpool runtime is the default (both in tokio and spirit).
///
/// This allows you to modify the builder prior to starting it, specifying custom options like
/// number of threads.
ThreadPool(Box<dyn FnMut(&mut runtime::Builder) + Send>),
/// Use the current thread runtime.
///
/// If you prefer to run everything in a single thread, use this variant. The provided closure
/// can modify the builder prior to starting it.
CurrentThread(Box<dyn FnMut(&mut runtime::current_thread::Builder) + Send>),
/// Use completely custom runtime.
///
/// The provided closure should start the runtime and execute the provided future on it,
/// blocking until the runtime becomes empty.
///
/// This allows combining arbitrary runtimes that are not directly supported by either tokio or
/// spirit.
Custom(Box<dyn FnMut(TokioBody) -> Result<(), Error> + Send>),
#[doc(hidden)]
__NonExhaustive__,
// TODO: Support loading this from configuration? But it won't be possible to modify at
// runtime, will it?
}
impl Default for Runtime {
fn default() -> Self {
Runtime::ThreadPool(Box::new(|_| {}))
}
}
impl Runtime {
fn execute<O, C>(self, spirit: &Arc<Spirit<O, C>>, inner: InnerBody) -> Result<(), Error>
where
C: DeserializeOwned + Send + Sync +'static,
O: StructOpt + Send + Sync +'static,
{
let spirit = Arc::clone(spirit);
let fut = future::lazy(move || {
inner.run().map_err(move |e| {
spirit.terminate();
e
})
});
match self {
Runtime::ThreadPool(mut mod_builder) => {
let mut builder = runtime::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?;
runtime.block_on(fut)?;
runtime.block_on_all(future::lazy(|| Ok(())))
}
Runtime::CurrentThread(mut mod_builder) => {
let mut builder = runtime::current_thread::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?;
runtime.block_on(fut)?;
runtime.run().map_err(Error::from)
}
Runtime::Custom(mut callback) => callback(Box::new(fut)),
Runtime::__NonExhaustive__ => unreachable!(),
}
}
}
impl<E> Extension<E> for Runtime
where
E: Extensible<Ok = E>,
E::Config: DeserializeOwned + Send + Sync +'static,
E::Opts: StructOpt + Send + Sync +'static,
{
fn apply(self, ext: E) -> Result<E, Error> {
trace!("Wrapping in tokio runtime");
ext.run_around(|spirit, inner| self.execute(spirit, inner))
}
}
/// A configuration extension for the Tokio Threadpool runtime.
///
/// Using the [`extension`][ThreadPoolConfig::extension] or the
/// [`postprocess_extension`][ThreadPoolConfig::postprocess_extension] provides the [`Runtime`] to
/// the spirit application. However, this allows reading the parameters of the threadpool (mostly
/// number of threads) from the configuration instead of hardcoding it into the application.
///
/// # Panics
///
/// If this is inserted after something already registered a [`Runtime`].
///
/// # Examples
///
/// ```rust
/// use serde::Deserialize;
/// use spirit::prelude::*;
/// use spirit_tokio::runtime::ThreadPoolConfig;
///
/// #[derive(Debug, Default, Deserialize)]
/// struct Cfg {
/// #[serde(default)] // Allow empty configuration with default runtime
/// threadpool: ThreadPoolConfig,
/// }
///
/// impl Cfg {
/// fn threadpool(&self) -> ThreadPoolConfig {
/// self.threadpool.clone()
/// }
/// }
///
/// fn main() {
/// Spirit::<Empty, Cfg>::new()
/// .with(ThreadPoolConfig::extension(Cfg::threadpool))
/// .run(|_| {
/// // This runs inside a configured runtime
/// Ok(())
/// });
/// }
/// ```
#[derive(
Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize, StructDoc, Ord, PartialOrd, Hash,
)]
#[serde(rename_all = "kebab-case")]
pub struct ThreadPoolConfig {
/// Maximum number of asynchronous worker threads.
///
/// These do most of the work. There's little reason to set it to more than number of CPUs, but |
/// Maximum number of blocking worker threads.
///
/// These do tasks that take longer time. This includes file IO and CPU intensive tasks.
///
/// If not set, defaults to 100.
///
/// Often, the application doesn't start these threads as they might not always be needed.
#[serde(skip_serializing_if = "Option::is_none")]
pub blocking_threads: Option<usize>,
#[serde(
skip_serializing_if = "Option::is_none",
serialize_with = "spirit::utils::serialize_opt_duration",
deserialize_with = "spirit::utils::deserialize_opt_duration",
default
)]
/// How long to keep an idle thread around.
///
/// A thread will be shut down if it sits around idle for this long. The default (unset) is
/// never to shut it down.
///
/// Accepts human-parsable times, like „3days“ or „5s“.
pub keep_alive: Option<Duration>,
#[serde(skip)]
_sentinel: (),
}
impl ThreadPoolConfig {
/// The extension to be plugged in with [`with`].
///
/// See the [example](#examples).
///
/// [`with`]: spirit::extension::Extension::with
pub fn extension<O, C, F>(extract: F) -> impl Extension<Builder<O, C>>
where
F: Fn(&C) -> Self + Clone + Send + Sync +'static,
O: Debug + StructOpt + Send + Sync +'static,
C: DeserializeOwned + Send + Sync +'static,
{
Self::postprocess_extension(extract, |_: &mut _| ())
}
/// Similar to [`extension`][ThreadPoolConfig::extension], but allows further tweaking.
///
/// This allows to tweak the [threadpool builder][runtime::Builder] after it was pre-configured
/// by the configuration file. This might be desirable, for example, if the application also
/// wants to install an [`after_start`][runtime::Builder::after_start] or set the stack size
/// which either can't or don't make sense to configure by the user.
pub fn postprocess_extension<O, C, F, P>(extract: F, post: P) -> impl Extension<Builder<O, C>>
where
F: Fn(&C) -> Self + Clone + Send + Sync +'static,
P: FnOnce(&mut runtime::Builder) + Send +'static,
O: Debug + StructOpt + Send + Sync +'static,
C: DeserializeOwned + Send + Sync +'static,
{
let mut post = Some(post);
|mut builder: Builder<O, C>| {
assert!(
builder.singleton::<Runtime>(),
"Tokio Runtime already inserted"
);
trace!("Inserting configurable tokio runtime");
builder
.on_config({
let extract = extract.clone();
let mut first = None;
move |_: &O, cfg: &Arc<C>| {
let cfg = extract(cfg);
if first.is_none() {
first = Some(cfg);
} else if first.as_ref()!= Some(&cfg) {
warn!("Tokio threadpool configuration can't be changed at runtime");
}
}
})
.run_around(|spirit, inner| {
Runtime::ThreadPool({
let spirit = Arc::clone(spirit);
Box::new(move |builder| {
let cfg = extract(&spirit.config());
if let Some(threads) = cfg.async_threads {
builder.core_threads(threads);
}
if let Some(threads) = cfg.blocking_threads {
builder.blocking_threads(threads);
}
if let Some(alive) = cfg.keep_alive {
builder.keep_alive(Some(alive));
}
(post.take().unwrap())(builder)
})
})
.execute(spirit, inner)
})
}
}
} | /// it may make sense to set it lower.
///
/// If not set, the application will start with number of CPUs available in the system.
#[serde(skip_serializing_if = "Option::is_none")]
pub async_threads: Option<usize>, | random_line_split |
init.rs |
use crate::{Error, Result, Logger, LogLevel, netlink, sys};
use crate::cmdline::CmdLine;
use crate::sys::{sethostname, setsid, set_controlling_tty, mount_devtmpfs, mount_tmpfs, mkdir, umount, mount_sysfs, mount_procfs, mount_devpts, chown, chmod, create_directories, mount_overlay, move_mount, pivot_root, mount_9p, mount, waitpid, reboot, getpid, mount_tmpdir, mount_cgroup, umask, _chown};
use std::path::Path;
use std::{fs, process, io, env};
use crate::service::{Service, ServiceLaunch};
use std::collections::BTreeMap;
use std::io::Read;
use std::net::Ipv4Addr;
use std::str::FromStr;
use crate::audio::AudioSupport;
use crate::netlink::NetlinkSocket;
const BASHRC: &str = r#"
export PS1="airwolf > "
umask 022
shopt -s checkwinsize
alias ls='ls --color=auto'
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
"#;
pub struct InitServer {
hostname: String,
homedir: String,
cmdline: CmdLine,
rootfs: RootFS,
services: BTreeMap<u32, Service>,
}
impl InitServer {
fn new(hostname: &str) -> Result<InitServer> {
Self::check_pid1()?;
let hostname = hostname.to_string();
let cmdline = CmdLine::load()?;
let homedir = cmdline.lookup("phinit.home")
.unwrap_or("/home/user".to_string());
let rootfs = RootFS::load(&cmdline)?;
let services = BTreeMap::new();
Ok(InitServer {
hostname,
homedir,
cmdline,
rootfs,
services,
})
}
pub fn create(hostname: &str) -> Result<InitServer> {
let init = Self::new(hostname)?;
init.initialize()?;
Ok(init)
}
fn initialize(&self) -> Result<()> {
self.set_loglevel();
umask(0);
sethostname(&self.hostname)?;
setsid()?;
set_controlling_tty(0, true)?;
Ok(())
}
fn check_pid1() -> Result<()> {
if getpid() == 1 {
Ok(())
} else {
Err(Error::Pid1)
}
}
fn homedir(&self) -> &str {
&self.homedir
}
pub fn set_loglevel(&self) {
if self.cmdline.has_var("phinit.verbose") {
Logger::set_log_level(LogLevel::Verbose);
} else if self.cmdline.has_var("phinit.debug") {
Logger::set_log_level(LogLevel::Debug);
} else {
Logger::set_log_level(LogLevel::Info);
}
}
pub fn setup_filesystem(&self) -> Result<()> {
sys::set_umask(0o022);
//mount_devtmpfs()?;
mount_tmpfs("/tmp")?;
mkdir("/tmp/sysroot")?;
if self.rootfs.read_only() {
self.setup_readonly_root()?;
} else {
self.setup_writeable_root()?;
}
fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname))
.map_err(Error::WriteEtcHosts)?;
umount("/opt/ph/tmp")?;
umount("/opt/ph/proc")?;
umount("/opt/ph/dev")?;
mount_sysfs()?;
mount_cgroup()?;
mount_procfs()?;
mount_devtmpfs()?;
mount_devpts()?;
mount_tmpfs("/run")?;
mount_tmpdir("/tmp")?;
mkdir("/dev/shm")?;
mount_tmpdir("/dev/shm")?;
mkdir("/run/user")?;
mkdir("/run/user/1000")?;
chown("/run/user/1000", 1000,1000)?;
AudioSupport::setup()?;
self.mount_home_if_exists()?;
Logger::set_file_output("/run/phinit.log")
.map_err(Error::OpenLogFailed)?;
Ok(())
}
fn setup_readonly_root(&self) -> Result<()> {
create_directories(&[
"/tmp/ro",
"/tmp/rw",
"/tmp/rw/upper",
"/tmp/rw/work",
])?;
mount_tmpfs("/tmp/rw")?;
create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?;
self.rootfs.mount("/tmp/ro")?;
mount_overlay("/tmp/sysroot",
"lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?;
create_directories(&[
"/tmp/sysroot/ro",
"/tmp/sysroot/rw"
])?;
move_mount("/tmp/ro", "/tmp/sysroot/ro")?;
move_mount("/tmp/rw", "/tmp/sysroot/rw")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if!toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn setup_writeable_root(&self) -> Result<()> {
self.rootfs.mount("/tmp/sysroot")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if!toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn has_9p_home(&self) -> bool {
// XXX
// /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag
true
}
pub fn mount_home_if_exists(&self) -> Result<()> {
if self.has_9p_home() {
let homedir = Path::new(self.homedir());
if!homedir.exists() {
mkdir(homedir)?;
}
mount_9p("home", self.homedir())?;
}
Ok(())
}
pub fn run_daemons(&mut self) -> Result<()> {
if!Path::new("/dev/wl0").exists() {
return Ok(());
}
chmod("/dev/wl0", 0o666)?;
let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon")
.base_environment()
.uidgid(1000,1000)
.env("HOME", self.homedir())
.env("NO_AT_BRIDGE", "1")
.env("QT_ACCESSIBILITY", "1")
.env("SHELL", "/bin/bash")
.env("USER", "user")
.env("WAYLAND_DISPLAY", "wayland-0")
.arg("--session")
.arg("--nosyslog")
.arg("--address=unix:path=/run/user/1000/bus")
.arg("--print-address")
.pipe_output()
.launch()?;
self.services.insert(dbus.pid(), dbus);
let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("--parent")
.pipe_output()
.launch()?;
self.services.insert(sommelier.pid(), sommelier);
if self.cmdline.has_var("phinit.no_x11") {
return Ok(());
}
mkdir("/tmp/.X11-unix")?;
chmod("/tmp/.X11-unix", 0o1777)?;
self.write_xauth().map_err(Error::XAuthFail)?;
let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("-X")
.arg("--x-display=0")
.arg("--no-exit-with-child")
.arg(format!("--x-auth={}/.Xauthority", self.homedir()))
.arg("/bin/true")
.pipe_output()
.launch()?;
self.services.insert(sommelierx.pid(), sommelierx);
Ok(())
}
pub fn setup_network(&self) -> Result<()> {
if let Some(val) = self.cmdline.lookup("phinit.ip") {
if let Ok(ip) = Ipv4Addr::from_str(&val) {
self.configure_network(ip)
.map_err(Error::NetworkConfigure)?;
}
sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?;
}
Ok(())
}
fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> |
fn write_xauth(&self) -> io::Result<()> {
let xauth_path = format!("{}/.Xauthority", self.homedir());
let mut randbuf = [0; 16];
let mut file = fs::File::open("/dev/urandom")?;
file.read_exact(&mut randbuf)?;
let mut v: Vec<u8> = Vec::new();
//???
v.extend_from_slice(&[0x01, 0x00]);
// "airwolf".len()
v.extend_from_slice(&[0x00, 0x07]);
v.extend_from_slice(b"airwolf");
// "0".len() (DISPLAY=:0)
v.extend_from_slice(&[0x00, 0x01]);
v.extend_from_slice(b"0");
// "MIT-MAGIC-COOKIE-a".len()
v.extend_from_slice(&[0x00, 0x12]);
v.extend_from_slice(b"MIT-MAGIC-COOKIE-1");
// randbuf.len()
v.extend_from_slice(&[0x00, 0x10]);
v.extend_from_slice(&randbuf);
fs::write(&xauth_path, v)?;
_chown(&xauth_path, 1000, 1000)?;
Ok(())
}
pub fn launch_console_shell(&mut self, splash: &'static str) -> Result<()> {
fs::write("/run/bashrc", BASHRC).map_err(Error::WriteBashrc)?;
let root = self.cmdline.has_var("phinit.rootshell");
let realm = self.cmdline.lookup("phinit.realm");
let home = if root { "/".to_string() } else { self.homedir().to_string() };
let shell = ServiceLaunch::new_shell(root, &home, realm)
.arg("--rcfile").arg("/run/bashrc")
.launch_with_preexec(move || {
// set_controlling_tty(0, true)?;
env::set_current_dir(&home)?;
println!("{}", splash);
Ok(())
})?;
self.services.insert(shell.pid(), shell);
Ok(())
}
fn wait_for_next_child(&mut self) -> Result<()> {
if let Some(child) = self.wait_for_child() {
info!("Service exited: {}", child.name());
if child.name() == "shell" {
reboot(libc::RB_AUTOBOOT)
.map_err(Error::RebootFailed)?;
}
}
Ok(())
}
pub fn run(&mut self) -> Result<()> {
loop {
self.wait_for_next_child()?;
}
}
fn handle_waitpid_err(err: io::Error) ->! {
if let Some(errno) = err.raw_os_error() {
if errno == libc::ECHILD {
if let Err(err) = reboot(libc::RB_AUTOBOOT) {
warn!("reboot() failed: {:?}", err);
process::exit(-1);
}
}
}
warn!("error on waitpid: {:?}", err);
process::exit(-1);
}
fn wait_for_child(&mut self) -> Option<Service> {
match waitpid(-1, 0) {
Ok((pid,_status)) => self.services.remove(&(pid as u32)),
Err(err) => Self::handle_waitpid_err(err)
}
}
}
struct RootFS {
root: String,
fstype: String,
rootflags: Option<String>,
readonly: bool,
}
impl RootFS {
fn load(cmdline: &CmdLine) -> Result<Self> {
let root = cmdline.lookup("phinit.root")
.ok_or(Error::NoRootVar)?;
let fstype = cmdline.lookup("phinit.rootfstype")
.ok_or(Error::NoRootFsVar)?;
let rootflags = cmdline.lookup("phinit.rootflags");
let readonly =!cmdline.has_var("phinit.root_rw");
Ok(RootFS {
root, fstype, rootflags, readonly
})
}
fn read_only(&self) -> bool {
self.readonly
}
fn mount(&self, target: &str) -> Result<()> {
let options = self.rootflags.as_ref().map(|s| s.as_str());
let mut flags = libc::MS_NOATIME;
if self.readonly {
flags |= libc::MS_RDONLY;
}
mount(&self.root, target, &self.fstype, flags, options)
.map_err(|e| Error::RootFsMount(self.root.clone(), e))
}
}
| {
let mut octets = ip.octets();
octets[3] = 1;
let gw = Ipv4Addr::from(octets);
let nl = NetlinkSocket::open()?;
if !nl.interface_exists("eth0") {
}
nl.add_ip_address("eth0", ip, 24)?;
nl.set_interface_up("eth0")?;
nl.add_default_route(gw)?;
Ok(())
} | identifier_body |
init.rs |
use crate::{Error, Result, Logger, LogLevel, netlink, sys};
use crate::cmdline::CmdLine;
use crate::sys::{sethostname, setsid, set_controlling_tty, mount_devtmpfs, mount_tmpfs, mkdir, umount, mount_sysfs, mount_procfs, mount_devpts, chown, chmod, create_directories, mount_overlay, move_mount, pivot_root, mount_9p, mount, waitpid, reboot, getpid, mount_tmpdir, mount_cgroup, umask, _chown};
use std::path::Path;
use std::{fs, process, io, env};
use crate::service::{Service, ServiceLaunch};
use std::collections::BTreeMap;
use std::io::Read;
use std::net::Ipv4Addr;
use std::str::FromStr;
use crate::audio::AudioSupport;
use crate::netlink::NetlinkSocket;
const BASHRC: &str = r#"
export PS1="airwolf > "
umask 022
shopt -s checkwinsize
alias ls='ls --color=auto'
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
"#;
pub struct InitServer {
hostname: String,
homedir: String,
cmdline: CmdLine,
rootfs: RootFS,
services: BTreeMap<u32, Service>,
}
impl InitServer {
fn new(hostname: &str) -> Result<InitServer> {
Self::check_pid1()?;
let hostname = hostname.to_string();
let cmdline = CmdLine::load()?;
let homedir = cmdline.lookup("phinit.home")
.unwrap_or("/home/user".to_string());
let rootfs = RootFS::load(&cmdline)?;
let services = BTreeMap::new();
Ok(InitServer {
hostname,
homedir,
cmdline,
rootfs,
services,
})
}
pub fn create(hostname: &str) -> Result<InitServer> {
let init = Self::new(hostname)?;
init.initialize()?;
Ok(init)
}
fn initialize(&self) -> Result<()> {
self.set_loglevel();
umask(0);
sethostname(&self.hostname)?;
setsid()?;
set_controlling_tty(0, true)?;
Ok(())
}
fn check_pid1() -> Result<()> {
if getpid() == 1 {
Ok(())
} else {
Err(Error::Pid1)
}
}
fn homedir(&self) -> &str {
&self.homedir
}
pub fn set_loglevel(&self) {
if self.cmdline.has_var("phinit.verbose") {
Logger::set_log_level(LogLevel::Verbose);
} else if self.cmdline.has_var("phinit.debug") {
Logger::set_log_level(LogLevel::Debug);
} else |
}
pub fn setup_filesystem(&self) -> Result<()> {
sys::set_umask(0o022);
//mount_devtmpfs()?;
mount_tmpfs("/tmp")?;
mkdir("/tmp/sysroot")?;
if self.rootfs.read_only() {
self.setup_readonly_root()?;
} else {
self.setup_writeable_root()?;
}
fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname))
.map_err(Error::WriteEtcHosts)?;
umount("/opt/ph/tmp")?;
umount("/opt/ph/proc")?;
umount("/opt/ph/dev")?;
mount_sysfs()?;
mount_cgroup()?;
mount_procfs()?;
mount_devtmpfs()?;
mount_devpts()?;
mount_tmpfs("/run")?;
mount_tmpdir("/tmp")?;
mkdir("/dev/shm")?;
mount_tmpdir("/dev/shm")?;
mkdir("/run/user")?;
mkdir("/run/user/1000")?;
chown("/run/user/1000", 1000,1000)?;
AudioSupport::setup()?;
self.mount_home_if_exists()?;
Logger::set_file_output("/run/phinit.log")
.map_err(Error::OpenLogFailed)?;
Ok(())
}
fn setup_readonly_root(&self) -> Result<()> {
create_directories(&[
"/tmp/ro",
"/tmp/rw",
"/tmp/rw/upper",
"/tmp/rw/work",
])?;
mount_tmpfs("/tmp/rw")?;
create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?;
self.rootfs.mount("/tmp/ro")?;
mount_overlay("/tmp/sysroot",
"lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?;
create_directories(&[
"/tmp/sysroot/ro",
"/tmp/sysroot/rw"
])?;
move_mount("/tmp/ro", "/tmp/sysroot/ro")?;
move_mount("/tmp/rw", "/tmp/sysroot/rw")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if!toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn setup_writeable_root(&self) -> Result<()> {
self.rootfs.mount("/tmp/sysroot")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if!toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn has_9p_home(&self) -> bool {
// XXX
// /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag
true
}
pub fn mount_home_if_exists(&self) -> Result<()> {
if self.has_9p_home() {
let homedir = Path::new(self.homedir());
if!homedir.exists() {
mkdir(homedir)?;
}
mount_9p("home", self.homedir())?;
}
Ok(())
}
pub fn run_daemons(&mut self) -> Result<()> {
if!Path::new("/dev/wl0").exists() {
return Ok(());
}
chmod("/dev/wl0", 0o666)?;
let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon")
.base_environment()
.uidgid(1000,1000)
.env("HOME", self.homedir())
.env("NO_AT_BRIDGE", "1")
.env("QT_ACCESSIBILITY", "1")
.env("SHELL", "/bin/bash")
.env("USER", "user")
.env("WAYLAND_DISPLAY", "wayland-0")
.arg("--session")
.arg("--nosyslog")
.arg("--address=unix:path=/run/user/1000/bus")
.arg("--print-address")
.pipe_output()
.launch()?;
self.services.insert(dbus.pid(), dbus);
let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("--parent")
.pipe_output()
.launch()?;
self.services.insert(sommelier.pid(), sommelier);
if self.cmdline.has_var("phinit.no_x11") {
return Ok(());
}
mkdir("/tmp/.X11-unix")?;
chmod("/tmp/.X11-unix", 0o1777)?;
self.write_xauth().map_err(Error::XAuthFail)?;
let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("-X")
.arg("--x-display=0")
.arg("--no-exit-with-child")
.arg(format!("--x-auth={}/.Xauthority", self.homedir()))
.arg("/bin/true")
.pipe_output()
.launch()?;
self.services.insert(sommelierx.pid(), sommelierx);
Ok(())
}
pub fn setup_network(&self) -> Result<()> {
if let Some(val) = self.cmdline.lookup("phinit.ip") {
if let Ok(ip) = Ipv4Addr::from_str(&val) {
self.configure_network(ip)
.map_err(Error::NetworkConfigure)?;
}
sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?;
}
Ok(())
}
fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> {
let mut octets = ip.octets();
octets[3] = 1;
let gw = Ipv4Addr::from(octets);
let nl = NetlinkSocket::open()?;
if!nl.interface_exists("eth0") {
}
nl.add_ip_address("eth0", ip, 24)?;
nl.set_interface_up("eth0")?;
nl.add_default_route(gw)?;
Ok(())
}
fn write_xauth(&self) -> io::Result<()> {
let xauth_path = format!("{}/.Xauthority", self.homedir());
let mut randbuf = [0; 16];
let mut file = fs::File::open("/dev/urandom")?;
file.read_exact(&mut randbuf)?;
let mut v: Vec<u8> = Vec::new();
//???
v.extend_from_slice(&[0x01, 0x00]);
// "airwolf".len()
v.extend_from_slice(&[0x00, 0x07]);
v.extend_from_slice(b"airwolf");
// "0".len() (DISPLAY=:0)
v.extend_from_slice(&[0x00, 0x01]);
v.extend_from_slice(b"0");
// "MIT-MAGIC-COOKIE-a".len()
v.extend_from_slice(&[0x00, 0x12]);
v.extend_from_slice(b"MIT-MAGIC-COOKIE-1");
// randbuf.len()
v.extend_from_slice(&[0x00, 0x10]);
v.extend_from_slice(&randbuf);
fs::write(&xauth_path, v)?;
_chown(&xauth_path, 1000, 1000)?;
Ok(())
}
pub fn launch_console_shell(&mut self, splash: &'static str) -> Result<()> {
fs::write("/run/bashrc", BASHRC).map_err(Error::WriteBashrc)?;
let root = self.cmdline.has_var("phinit.rootshell");
let realm = self.cmdline.lookup("phinit.realm");
let home = if root { "/".to_string() } else { self.homedir().to_string() };
let shell = ServiceLaunch::new_shell(root, &home, realm)
.arg("--rcfile").arg("/run/bashrc")
.launch_with_preexec(move || {
// set_controlling_tty(0, true)?;
env::set_current_dir(&home)?;
println!("{}", splash);
Ok(())
})?;
self.services.insert(shell.pid(), shell);
Ok(())
}
fn wait_for_next_child(&mut self) -> Result<()> {
if let Some(child) = self.wait_for_child() {
info!("Service exited: {}", child.name());
if child.name() == "shell" {
reboot(libc::RB_AUTOBOOT)
.map_err(Error::RebootFailed)?;
}
}
Ok(())
}
pub fn run(&mut self) -> Result<()> {
loop {
self.wait_for_next_child()?;
}
}
fn handle_waitpid_err(err: io::Error) ->! {
if let Some(errno) = err.raw_os_error() {
if errno == libc::ECHILD {
if let Err(err) = reboot(libc::RB_AUTOBOOT) {
warn!("reboot() failed: {:?}", err);
process::exit(-1);
}
}
}
warn!("error on waitpid: {:?}", err);
process::exit(-1);
}
fn wait_for_child(&mut self) -> Option<Service> {
match waitpid(-1, 0) {
Ok((pid,_status)) => self.services.remove(&(pid as u32)),
Err(err) => Self::handle_waitpid_err(err)
}
}
}
struct RootFS {
root: String,
fstype: String,
rootflags: Option<String>,
readonly: bool,
}
impl RootFS {
fn load(cmdline: &CmdLine) -> Result<Self> {
let root = cmdline.lookup("phinit.root")
.ok_or(Error::NoRootVar)?;
let fstype = cmdline.lookup("phinit.rootfstype")
.ok_or(Error::NoRootFsVar)?;
let rootflags = cmdline.lookup("phinit.rootflags");
let readonly =!cmdline.has_var("phinit.root_rw");
Ok(RootFS {
root, fstype, rootflags, readonly
})
}
fn read_only(&self) -> bool {
self.readonly
}
fn mount(&self, target: &str) -> Result<()> {
let options = self.rootflags.as_ref().map(|s| s.as_str());
let mut flags = libc::MS_NOATIME;
if self.readonly {
flags |= libc::MS_RDONLY;
}
mount(&self.root, target, &self.fstype, flags, options)
.map_err(|e| Error::RootFsMount(self.root.clone(), e))
}
}
| {
Logger::set_log_level(LogLevel::Info);
} | conditional_block |
init.rs | use crate::{Error, Result, Logger, LogLevel, netlink, sys};
use crate::cmdline::CmdLine;
use crate::sys::{sethostname, setsid, set_controlling_tty, mount_devtmpfs, mount_tmpfs, mkdir, umount, mount_sysfs, mount_procfs, mount_devpts, chown, chmod, create_directories, mount_overlay, move_mount, pivot_root, mount_9p, mount, waitpid, reboot, getpid, mount_tmpdir, mount_cgroup, umask, _chown};
use std::path::Path;
use std::{fs, process, io, env};
use crate::service::{Service, ServiceLaunch};
use std::collections::BTreeMap;
use std::io::Read;
use std::net::Ipv4Addr;
use std::str::FromStr;
use crate::audio::AudioSupport;
use crate::netlink::NetlinkSocket;
const BASHRC: &str = r#"
export PS1="airwolf > "
umask 022
shopt -s checkwinsize
alias ls='ls --color=auto'
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
"#;
pub struct InitServer {
hostname: String,
homedir: String,
cmdline: CmdLine,
rootfs: RootFS,
services: BTreeMap<u32, Service>,
}
impl InitServer {
fn new(hostname: &str) -> Result<InitServer> {
Self::check_pid1()?;
let hostname = hostname.to_string();
let cmdline = CmdLine::load()?;
let homedir = cmdline.lookup("phinit.home")
.unwrap_or("/home/user".to_string());
let rootfs = RootFS::load(&cmdline)?;
let services = BTreeMap::new();
Ok(InitServer {
hostname,
homedir,
cmdline,
rootfs,
services,
})
}
pub fn create(hostname: &str) -> Result<InitServer> {
let init = Self::new(hostname)?;
init.initialize()?;
Ok(init)
}
fn initialize(&self) -> Result<()> {
self.set_loglevel();
umask(0);
sethostname(&self.hostname)?;
setsid()?;
set_controlling_tty(0, true)?;
Ok(())
}
fn check_pid1() -> Result<()> {
if getpid() == 1 {
Ok(())
} else {
Err(Error::Pid1)
}
}
fn homedir(&self) -> &str {
&self.homedir
}
pub fn set_loglevel(&self) {
if self.cmdline.has_var("phinit.verbose") {
Logger::set_log_level(LogLevel::Verbose);
} else if self.cmdline.has_var("phinit.debug") {
Logger::set_log_level(LogLevel::Debug);
} else {
Logger::set_log_level(LogLevel::Info);
}
}
pub fn setup_filesystem(&self) -> Result<()> {
sys::set_umask(0o022);
//mount_devtmpfs()?;
mount_tmpfs("/tmp")?;
mkdir("/tmp/sysroot")?;
if self.rootfs.read_only() {
self.setup_readonly_root()?;
} else {
self.setup_writeable_root()?;
}
fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname))
.map_err(Error::WriteEtcHosts)?;
umount("/opt/ph/tmp")?;
umount("/opt/ph/proc")?;
umount("/opt/ph/dev")?;
mount_sysfs()?;
mount_cgroup()?;
mount_procfs()?;
mount_devtmpfs()?;
mount_devpts()?;
mount_tmpfs("/run")?;
mount_tmpdir("/tmp")?;
mkdir("/dev/shm")?;
mount_tmpdir("/dev/shm")?;
mkdir("/run/user")?;
mkdir("/run/user/1000")?;
chown("/run/user/1000", 1000,1000)?;
AudioSupport::setup()?;
self.mount_home_if_exists()?;
Logger::set_file_output("/run/phinit.log")
.map_err(Error::OpenLogFailed)?;
Ok(())
}
fn setup_readonly_root(&self) -> Result<()> {
create_directories(&[
"/tmp/ro",
"/tmp/rw",
"/tmp/rw/upper",
"/tmp/rw/work",
])?;
mount_tmpfs("/tmp/rw")?;
create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?;
self.rootfs.mount("/tmp/ro")?;
mount_overlay("/tmp/sysroot",
"lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?;
create_directories(&[
"/tmp/sysroot/ro",
"/tmp/sysroot/rw"
])?;
move_mount("/tmp/ro", "/tmp/sysroot/ro")?;
move_mount("/tmp/rw", "/tmp/sysroot/rw")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if!toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn setup_writeable_root(&self) -> Result<()> {
self.rootfs.mount("/tmp/sysroot")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if!toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn has_9p_home(&self) -> bool {
// XXX
// /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag
true
}
pub fn mount_home_if_exists(&self) -> Result<()> {
if self.has_9p_home() {
let homedir = Path::new(self.homedir());
if!homedir.exists() {
mkdir(homedir)?;
}
mount_9p("home", self.homedir())?;
}
Ok(())
}
pub fn run_daemons(&mut self) -> Result<()> {
if!Path::new("/dev/wl0").exists() {
return Ok(()); |
chmod("/dev/wl0", 0o666)?;
let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon")
.base_environment()
.uidgid(1000,1000)
.env("HOME", self.homedir())
.env("NO_AT_BRIDGE", "1")
.env("QT_ACCESSIBILITY", "1")
.env("SHELL", "/bin/bash")
.env("USER", "user")
.env("WAYLAND_DISPLAY", "wayland-0")
.arg("--session")
.arg("--nosyslog")
.arg("--address=unix:path=/run/user/1000/bus")
.arg("--print-address")
.pipe_output()
.launch()?;
self.services.insert(dbus.pid(), dbus);
let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("--parent")
.pipe_output()
.launch()?;
self.services.insert(sommelier.pid(), sommelier);
if self.cmdline.has_var("phinit.no_x11") {
return Ok(());
}
mkdir("/tmp/.X11-unix")?;
chmod("/tmp/.X11-unix", 0o1777)?;
self.write_xauth().map_err(Error::XAuthFail)?;
let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("-X")
.arg("--x-display=0")
.arg("--no-exit-with-child")
.arg(format!("--x-auth={}/.Xauthority", self.homedir()))
.arg("/bin/true")
.pipe_output()
.launch()?;
self.services.insert(sommelierx.pid(), sommelierx);
Ok(())
}
pub fn setup_network(&self) -> Result<()> {
if let Some(val) = self.cmdline.lookup("phinit.ip") {
if let Ok(ip) = Ipv4Addr::from_str(&val) {
self.configure_network(ip)
.map_err(Error::NetworkConfigure)?;
}
sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?;
}
Ok(())
}
fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> {
let mut octets = ip.octets();
octets[3] = 1;
let gw = Ipv4Addr::from(octets);
let nl = NetlinkSocket::open()?;
if!nl.interface_exists("eth0") {
}
nl.add_ip_address("eth0", ip, 24)?;
nl.set_interface_up("eth0")?;
nl.add_default_route(gw)?;
Ok(())
}
fn write_xauth(&self) -> io::Result<()> {
let xauth_path = format!("{}/.Xauthority", self.homedir());
let mut randbuf = [0; 16];
let mut file = fs::File::open("/dev/urandom")?;
file.read_exact(&mut randbuf)?;
let mut v: Vec<u8> = Vec::new();
//???
v.extend_from_slice(&[0x01, 0x00]);
// "airwolf".len()
v.extend_from_slice(&[0x00, 0x07]);
v.extend_from_slice(b"airwolf");
// "0".len() (DISPLAY=:0)
v.extend_from_slice(&[0x00, 0x01]);
v.extend_from_slice(b"0");
// "MIT-MAGIC-COOKIE-a".len()
v.extend_from_slice(&[0x00, 0x12]);
v.extend_from_slice(b"MIT-MAGIC-COOKIE-1");
// randbuf.len()
v.extend_from_slice(&[0x00, 0x10]);
v.extend_from_slice(&randbuf);
fs::write(&xauth_path, v)?;
_chown(&xauth_path, 1000, 1000)?;
Ok(())
}
pub fn launch_console_shell(&mut self, splash: &'static str) -> Result<()> {
fs::write("/run/bashrc", BASHRC).map_err(Error::WriteBashrc)?;
let root = self.cmdline.has_var("phinit.rootshell");
let realm = self.cmdline.lookup("phinit.realm");
let home = if root { "/".to_string() } else { self.homedir().to_string() };
let shell = ServiceLaunch::new_shell(root, &home, realm)
.arg("--rcfile").arg("/run/bashrc")
.launch_with_preexec(move || {
// set_controlling_tty(0, true)?;
env::set_current_dir(&home)?;
println!("{}", splash);
Ok(())
})?;
self.services.insert(shell.pid(), shell);
Ok(())
}
fn wait_for_next_child(&mut self) -> Result<()> {
if let Some(child) = self.wait_for_child() {
info!("Service exited: {}", child.name());
if child.name() == "shell" {
reboot(libc::RB_AUTOBOOT)
.map_err(Error::RebootFailed)?;
}
}
Ok(())
}
pub fn run(&mut self) -> Result<()> {
loop {
self.wait_for_next_child()?;
}
}
fn handle_waitpid_err(err: io::Error) ->! {
if let Some(errno) = err.raw_os_error() {
if errno == libc::ECHILD {
if let Err(err) = reboot(libc::RB_AUTOBOOT) {
warn!("reboot() failed: {:?}", err);
process::exit(-1);
}
}
}
warn!("error on waitpid: {:?}", err);
process::exit(-1);
}
fn wait_for_child(&mut self) -> Option<Service> {
match waitpid(-1, 0) {
Ok((pid,_status)) => self.services.remove(&(pid as u32)),
Err(err) => Self::handle_waitpid_err(err)
}
}
}
struct RootFS {
root: String,
fstype: String,
rootflags: Option<String>,
readonly: bool,
}
impl RootFS {
fn load(cmdline: &CmdLine) -> Result<Self> {
let root = cmdline.lookup("phinit.root")
.ok_or(Error::NoRootVar)?;
let fstype = cmdline.lookup("phinit.rootfstype")
.ok_or(Error::NoRootFsVar)?;
let rootflags = cmdline.lookup("phinit.rootflags");
let readonly =!cmdline.has_var("phinit.root_rw");
Ok(RootFS {
root, fstype, rootflags, readonly
})
}
fn read_only(&self) -> bool {
self.readonly
}
fn mount(&self, target: &str) -> Result<()> {
let options = self.rootflags.as_ref().map(|s| s.as_str());
let mut flags = libc::MS_NOATIME;
if self.readonly {
flags |= libc::MS_RDONLY;
}
mount(&self.root, target, &self.fstype, flags, options)
.map_err(|e| Error::RootFsMount(self.root.clone(), e))
}
} | } | random_line_split |
init.rs |
use crate::{Error, Result, Logger, LogLevel, netlink, sys};
use crate::cmdline::CmdLine;
use crate::sys::{sethostname, setsid, set_controlling_tty, mount_devtmpfs, mount_tmpfs, mkdir, umount, mount_sysfs, mount_procfs, mount_devpts, chown, chmod, create_directories, mount_overlay, move_mount, pivot_root, mount_9p, mount, waitpid, reboot, getpid, mount_tmpdir, mount_cgroup, umask, _chown};
use std::path::Path;
use std::{fs, process, io, env};
use crate::service::{Service, ServiceLaunch};
use std::collections::BTreeMap;
use std::io::Read;
use std::net::Ipv4Addr;
use std::str::FromStr;
use crate::audio::AudioSupport;
use crate::netlink::NetlinkSocket;
const BASHRC: &str = r#"
export PS1="airwolf > "
umask 022
shopt -s checkwinsize
alias ls='ls --color=auto'
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
"#;
pub struct InitServer {
hostname: String,
homedir: String,
cmdline: CmdLine,
rootfs: RootFS,
services: BTreeMap<u32, Service>,
}
impl InitServer {
fn new(hostname: &str) -> Result<InitServer> {
Self::check_pid1()?;
let hostname = hostname.to_string();
let cmdline = CmdLine::load()?;
let homedir = cmdline.lookup("phinit.home")
.unwrap_or("/home/user".to_string());
let rootfs = RootFS::load(&cmdline)?;
let services = BTreeMap::new();
Ok(InitServer {
hostname,
homedir,
cmdline,
rootfs,
services,
})
}
pub fn create(hostname: &str) -> Result<InitServer> {
let init = Self::new(hostname)?;
init.initialize()?;
Ok(init)
}
fn initialize(&self) -> Result<()> {
self.set_loglevel();
umask(0);
sethostname(&self.hostname)?;
setsid()?;
set_controlling_tty(0, true)?;
Ok(())
}
fn check_pid1() -> Result<()> {
if getpid() == 1 {
Ok(())
} else {
Err(Error::Pid1)
}
}
fn homedir(&self) -> &str {
&self.homedir
}
pub fn set_loglevel(&self) {
if self.cmdline.has_var("phinit.verbose") {
Logger::set_log_level(LogLevel::Verbose);
} else if self.cmdline.has_var("phinit.debug") {
Logger::set_log_level(LogLevel::Debug);
} else {
Logger::set_log_level(LogLevel::Info);
}
}
pub fn | (&self) -> Result<()> {
sys::set_umask(0o022);
//mount_devtmpfs()?;
mount_tmpfs("/tmp")?;
mkdir("/tmp/sysroot")?;
if self.rootfs.read_only() {
self.setup_readonly_root()?;
} else {
self.setup_writeable_root()?;
}
fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname))
.map_err(Error::WriteEtcHosts)?;
umount("/opt/ph/tmp")?;
umount("/opt/ph/proc")?;
umount("/opt/ph/dev")?;
mount_sysfs()?;
mount_cgroup()?;
mount_procfs()?;
mount_devtmpfs()?;
mount_devpts()?;
mount_tmpfs("/run")?;
mount_tmpdir("/tmp")?;
mkdir("/dev/shm")?;
mount_tmpdir("/dev/shm")?;
mkdir("/run/user")?;
mkdir("/run/user/1000")?;
chown("/run/user/1000", 1000,1000)?;
AudioSupport::setup()?;
self.mount_home_if_exists()?;
Logger::set_file_output("/run/phinit.log")
.map_err(Error::OpenLogFailed)?;
Ok(())
}
fn setup_readonly_root(&self) -> Result<()> {
create_directories(&[
"/tmp/ro",
"/tmp/rw",
"/tmp/rw/upper",
"/tmp/rw/work",
])?;
mount_tmpfs("/tmp/rw")?;
create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?;
self.rootfs.mount("/tmp/ro")?;
mount_overlay("/tmp/sysroot",
"lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?;
create_directories(&[
"/tmp/sysroot/ro",
"/tmp/sysroot/rw"
])?;
move_mount("/tmp/ro", "/tmp/sysroot/ro")?;
move_mount("/tmp/rw", "/tmp/sysroot/rw")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if!toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn setup_writeable_root(&self) -> Result<()> {
self.rootfs.mount("/tmp/sysroot")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if!toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn has_9p_home(&self) -> bool {
// XXX
// /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag
true
}
pub fn mount_home_if_exists(&self) -> Result<()> {
if self.has_9p_home() {
let homedir = Path::new(self.homedir());
if!homedir.exists() {
mkdir(homedir)?;
}
mount_9p("home", self.homedir())?;
}
Ok(())
}
pub fn run_daemons(&mut self) -> Result<()> {
if!Path::new("/dev/wl0").exists() {
return Ok(());
}
chmod("/dev/wl0", 0o666)?;
let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon")
.base_environment()
.uidgid(1000,1000)
.env("HOME", self.homedir())
.env("NO_AT_BRIDGE", "1")
.env("QT_ACCESSIBILITY", "1")
.env("SHELL", "/bin/bash")
.env("USER", "user")
.env("WAYLAND_DISPLAY", "wayland-0")
.arg("--session")
.arg("--nosyslog")
.arg("--address=unix:path=/run/user/1000/bus")
.arg("--print-address")
.pipe_output()
.launch()?;
self.services.insert(dbus.pid(), dbus);
let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("--parent")
.pipe_output()
.launch()?;
self.services.insert(sommelier.pid(), sommelier);
if self.cmdline.has_var("phinit.no_x11") {
return Ok(());
}
mkdir("/tmp/.X11-unix")?;
chmod("/tmp/.X11-unix", 0o1777)?;
self.write_xauth().map_err(Error::XAuthFail)?;
let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("-X")
.arg("--x-display=0")
.arg("--no-exit-with-child")
.arg(format!("--x-auth={}/.Xauthority", self.homedir()))
.arg("/bin/true")
.pipe_output()
.launch()?;
self.services.insert(sommelierx.pid(), sommelierx);
Ok(())
}
pub fn setup_network(&self) -> Result<()> {
if let Some(val) = self.cmdline.lookup("phinit.ip") {
if let Ok(ip) = Ipv4Addr::from_str(&val) {
self.configure_network(ip)
.map_err(Error::NetworkConfigure)?;
}
sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?;
}
Ok(())
}
fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> {
let mut octets = ip.octets();
octets[3] = 1;
let gw = Ipv4Addr::from(octets);
let nl = NetlinkSocket::open()?;
if!nl.interface_exists("eth0") {
}
nl.add_ip_address("eth0", ip, 24)?;
nl.set_interface_up("eth0")?;
nl.add_default_route(gw)?;
Ok(())
}
fn write_xauth(&self) -> io::Result<()> {
let xauth_path = format!("{}/.Xauthority", self.homedir());
let mut randbuf = [0; 16];
let mut file = fs::File::open("/dev/urandom")?;
file.read_exact(&mut randbuf)?;
let mut v: Vec<u8> = Vec::new();
//???
v.extend_from_slice(&[0x01, 0x00]);
// "airwolf".len()
v.extend_from_slice(&[0x00, 0x07]);
v.extend_from_slice(b"airwolf");
// "0".len() (DISPLAY=:0)
v.extend_from_slice(&[0x00, 0x01]);
v.extend_from_slice(b"0");
// "MIT-MAGIC-COOKIE-a".len()
v.extend_from_slice(&[0x00, 0x12]);
v.extend_from_slice(b"MIT-MAGIC-COOKIE-1");
// randbuf.len()
v.extend_from_slice(&[0x00, 0x10]);
v.extend_from_slice(&randbuf);
fs::write(&xauth_path, v)?;
_chown(&xauth_path, 1000, 1000)?;
Ok(())
}
pub fn launch_console_shell(&mut self, splash: &'static str) -> Result<()> {
fs::write("/run/bashrc", BASHRC).map_err(Error::WriteBashrc)?;
let root = self.cmdline.has_var("phinit.rootshell");
let realm = self.cmdline.lookup("phinit.realm");
let home = if root { "/".to_string() } else { self.homedir().to_string() };
let shell = ServiceLaunch::new_shell(root, &home, realm)
.arg("--rcfile").arg("/run/bashrc")
.launch_with_preexec(move || {
// set_controlling_tty(0, true)?;
env::set_current_dir(&home)?;
println!("{}", splash);
Ok(())
})?;
self.services.insert(shell.pid(), shell);
Ok(())
}
fn wait_for_next_child(&mut self) -> Result<()> {
if let Some(child) = self.wait_for_child() {
info!("Service exited: {}", child.name());
if child.name() == "shell" {
reboot(libc::RB_AUTOBOOT)
.map_err(Error::RebootFailed)?;
}
}
Ok(())
}
pub fn run(&mut self) -> Result<()> {
loop {
self.wait_for_next_child()?;
}
}
fn handle_waitpid_err(err: io::Error) ->! {
if let Some(errno) = err.raw_os_error() {
if errno == libc::ECHILD {
if let Err(err) = reboot(libc::RB_AUTOBOOT) {
warn!("reboot() failed: {:?}", err);
process::exit(-1);
}
}
}
warn!("error on waitpid: {:?}", err);
process::exit(-1);
}
fn wait_for_child(&mut self) -> Option<Service> {
match waitpid(-1, 0) {
Ok((pid,_status)) => self.services.remove(&(pid as u32)),
Err(err) => Self::handle_waitpid_err(err)
}
}
}
struct RootFS {
root: String,
fstype: String,
rootflags: Option<String>,
readonly: bool,
}
impl RootFS {
fn load(cmdline: &CmdLine) -> Result<Self> {
let root = cmdline.lookup("phinit.root")
.ok_or(Error::NoRootVar)?;
let fstype = cmdline.lookup("phinit.rootfstype")
.ok_or(Error::NoRootFsVar)?;
let rootflags = cmdline.lookup("phinit.rootflags");
let readonly =!cmdline.has_var("phinit.root_rw");
Ok(RootFS {
root, fstype, rootflags, readonly
})
}
fn read_only(&self) -> bool {
self.readonly
}
fn mount(&self, target: &str) -> Result<()> {
let options = self.rootflags.as_ref().map(|s| s.as_str());
let mut flags = libc::MS_NOATIME;
if self.readonly {
flags |= libc::MS_RDONLY;
}
mount(&self.root, target, &self.fstype, flags, options)
.map_err(|e| Error::RootFsMount(self.root.clone(), e))
}
}
| setup_filesystem | identifier_name |
olm_parser.rs | use crate::graph::graph::{Rules, Edges, Graph, Vertices};
use crate::io::{
limit_iter::Limit,
sub_matrix::SubMatrix,
tri_wave::TriWave,
utils::{DiagonalReflection, Reflection, Rotation},
};
use crate::utils::{index_to_coords, is_inside, coords_to_index};
use crate::wfc::collapse;
use bimap::BiMap;
use hashbrown::HashMap;
use image::{imageops, Rgb, RgbImage, Pixel};
use itertools::Itertools;
use nalgebra::DMatrix;
use std::ops::{IndexMut, Index, AddAssign};
use std::convert::TryFrom;
use indexmap::IndexMap;
use std::ops::Not;
use crate::MSu16xNU;
type Chunk = DMatrix<usize>;
type PixelKeys = BiMap<usize, Rgb<u8>>;
// TODO: handle unwrap of image::open properly
pub fn parse(filename: &str, chunk_size: usize) -> (Rules, PixelKeys, MSu16xNU, IndexMap<Chunk, u16>) {
let img = image::open(filename).unwrap().to_rgb8();
let pixel_aliases = alias_pixels(&img);
let chunk_frequencies = chunk_image(img, chunk_size, &pixel_aliases, true, false, false, false);
let overlap_rules = overlaps(&chunk_frequencies, chunk_size);
if chunk_frequencies.len() > MSu16xNU::len() {
println!("Chunks LEN: {}", chunk_frequencies.len());
panic!("labels multiset not large enough to store all unique chunks")
}
let all_labels = chunk_frequencies.values().collect();
let raw_graph = create_raw_graph(&all_labels, chunk_size, (3, 3));
let mut pruned_rules: Rules = HashMap::new();
(0..all_labels.count_non_zero())
.for_each(|label| {
// pruned graph vertices returned from collapse
let pruned_graph = propagate_overlaps(raw_graph.clone(), &overlap_rules, label as usize);
real_vertex_indexes(chunk_size)
.iter()
.enumerate()
.for_each(|(direction, index)| {
let set = pruned_graph.vertices.index(*index);
if!set.is_empty() {
pruned_rules.insert((direction as u16, label as usize), *set);
}
});
});
(pruned_rules, pixel_aliases, all_labels, chunk_frequencies)
}
// todo: work out if step will be needed, currently useless
const fn real_vertex_indexes(chunk_size: usize) -> [usize; 8] {
let dim = (3 * chunk_size) - (chunk_size - 1);
let step = chunk_size - 1;
[
0, // NW
step + 1, // N
(step + 1) * 2, // NE
dim * chunk_size, // W
// dim * chunk_size + step + 1 // Center (unused)
dim * chunk_size + (step + 1) * 2, // E
dim * chunk_size * 2, // SW
dim * chunk_size * 2 + step + 1, // S
dim * chunk_size * 2 + (step + 1) * 2, // SE
]
}
fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> {
let chunk_size_32: u32 = TryFrom::try_from(chunk_size)
.expect("chunk_size too large, cannot convert to u32");
let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1);
let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1);
height_iter
.cartesian_product(width_iter)
.map(move |(y, x)| {
imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image()
})
}
fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> {
image
.pixels()
.map(|p| *pixel_aliases.get_by_right(&p).unwrap())
.collect()
}
fn alias_pixels(image: &RgbImage) -> PixelKeys {
image
.pixels()
.unique()
.copied()
.enumerate()
.collect()
}
// returns the input image in unique chunks and frequencies of those chunks
fn | (
image: RgbImage,
chunk_size: usize,
pixel_aliases: &PixelKeys,
rotate: bool,
reflect_vertical: bool,
reflect_horizontal: bool,
reflect_diagonal: bool,
) -> IndexMap<Chunk, u16> {
sub_images(image, chunk_size)
.map(|sub_image| alias_sub_image(sub_image, pixel_aliases))
.fold(IndexMap::new(), |mut acc, aliases| {
let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases);
if rotate {
let mut rot_chunk = chunk.clone();
for _ in 0..3 {
rot_chunk = rot_chunk.rotate_90();
push_chunk_frequency(rot_chunk.clone(), &mut acc);
}
}
if reflect_vertical {
push_chunk_frequency(chunk.reflect_vertical(), &mut acc);
}
if reflect_horizontal {
push_chunk_frequency(chunk.reflect_horizontal(), &mut acc);
}
if reflect_diagonal {
push_chunk_frequency(chunk.reflect_top_left(), &mut acc);
push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc);
}
push_chunk_frequency(chunk, &mut acc);
acc
})
}
fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) {
frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1);
}
type Position = (usize, usize);
type Size = (usize, usize);
type Direction = u16;
fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> {
let period = chunk_size * 2 - 1;
let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period);
let pos_cart_prod = positions.clone().cartesian_product(positions);
pos_cart_prod
.map(|((y_position, y_size), (x_position, x_size))| (
(x_position, y_position),
(x_size + 1, y_size + 1)
))
.filter(|(_, (width, height))| width!= &chunk_size || height!= &chunk_size)
.enumerate()
.map(|(direction, (position, size))| (
position,
size,
direction as u16
))
.collect()
}
fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules {
chunks
.keys()
.enumerate()
.fold(HashMap::new(), |mut rules, (label, chunk)| {
let sub_positions = sub_chunk_positions(chunk_size);
sub_positions
.iter()
.for_each(|(position, size, direction)| {
let sub_chunk = chunk.sub_matrix(*position, *size);
let reverse_index = sub_positions.len() - 1 - *direction as usize;
let (rev_pos, rev_size, _) = sub_positions[reverse_index];
chunks
.keys()
.enumerate()
.for_each(|(other_label, other_chunk)| {
// find mirrored sub chunk
let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size);
if sub_chunk == other_sub_chunk {
let mut set = MSu16xNU::empty();
set.insert(other_label, 1);
rules
.entry((*direction, label))
.and_modify(|l| l.add_assign(set))
.or_insert(set);
}
})
});
rules
})
}
// Create a raw graph for pruning
fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph {
// pixel based graph dimensions
let v_dim_x = (width * chunk_size) - (chunk_size - 1);
let v_dim_y = (height * chunk_size) - (chunk_size - 1);
let vertices_len = v_dim_x * v_dim_y;
let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len];
// create negative indexed range to offset vertex centered directional field by N
let signed_chunk_size: i32 = TryFrom::try_from(chunk_size)
.expect("Cannot convert chunk_size to i32");
let range = 1 - signed_chunk_size..signed_chunk_size;
// calculate real cartesian space offest coordinates
let range_cart_prod = range.clone()
.cartesian_product(range)
.filter(|i| i!= &(0, 0)); // remove 0 offset for correct directional mapping
let edges: Edges = (0..vertices_len)
.fold(HashMap::new(), |mut acc, index| {
let (x, y) = index_to_coords(index, v_dim_x);
range_cart_prod
.clone()
.map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset))
.enumerate()
// remove coordinates outside of graph
.filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y)))
.for_each(|(direction, (y_offset, x_offset))| {
let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x);
acc
.entry(index as u32)
.and_modify(|v| v.push((other_index as u32, direction as u16)))
.or_insert(vec![(other_index as u32, direction as u16)]);
});
acc
});
Graph::new(vertices, edges, *all_labels)
}
fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph {
let central_vertex = (graph.vertices.len() - 1) / 2;
graph.vertices.index_mut(central_vertex).choose(label);
collapse::collapse(rules, &graph, None, Some(1))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::hash_map;
use image::ImageBuffer;
#[test]
fn test_alias_pixels() {
let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96];
let img = ImageBuffer::from_vec(2, 2, pixels).unwrap();
let pixel_aliases = alias_pixels(&img);
assert_eq!(pixel_aliases.len(), 4);
}
#[test]
fn test_chunk_image() {
let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8();
let mut pixel_aliases: PixelKeys = BiMap::new();
pixel_aliases.insert(0, Rgb::from([255, 255, 255]));
pixel_aliases.insert(1, Rgb::from([0, 0, 0]));
let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false);
let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new();
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 0, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 1, 0]), 2);
assert_eq!(chunk_map.len(), 8);
expected_map
.iter()
.for_each(|(chunk, frequency)| {
assert_eq!(chunk_map.get(chunk).unwrap(), frequency);
});
}
#[test]
fn test_subchunk_positions() {
let sub_chunks = vec![
((0, 0), (1, 1), 0),
((0, 0), (2, 1), 1),
((1, 0), (1, 1), 2),
((0, 0), (1, 2), 3),
// ((0, 0), (2, 2), 4) --> Implicit full overlap removed
((1, 0), (1, 2), 4),
((0, 1), (1, 1), 5),
((0, 1), (2, 1), 6),
((1, 1), (1, 1), 7)
];
assert_eq!(sub_chunk_positions(2), sub_chunks);
}
#[test]
fn test_overlaps() {
let mut chunks_n2: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 2, 3]), 1);
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![3, 2, 0, 1]), 1);
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![2, 0, 3, 1]), 1);
let mut overlaps_n2: Rules = HashMap::new();
overlaps_n2.insert((5, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((0, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((6, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((1, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((2, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((7, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((2, 2), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((5, 1), [0, 0, 1, 0].iter().collect());
let result_n2 = overlaps(&chunks_n2, 2);
assert_eq!(result_n2, overlaps_n2);
let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![0, 1, 2, 3, 4, 5, 6, 7, 8]), 1);
chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![9, 10, 11, 12, 13, 14, 15, 16, 0]), 1);
let mut overlaps_n3: Rules = HashMap::new();
overlaps_n3.insert((0, 0), [0, 1].iter().collect());
overlaps_n3.insert((23, 1), [1, 0].iter().collect());
let result_n3 = overlaps(&chunks_n3, 3);
assert_eq!(result_n3, overlaps_n3);
let mut chunks_n4: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n4.insert(DMatrix::from_row_slice(
4, 4, &vec![0, 0, 2, 3,
0, 1, 4, 5,
6, 7, 0, 0,
8, 9, 0, 1]), 1);
// test overlapping with self only
let mut overlaps_n4: Rules = HashMap::new();
overlaps_n4.insert((8, 0), [1, 0].iter().collect());
overlaps_n4.insert((39, 0), [1, 0].iter().collect());
let results_n4 = overlaps(&chunks_n4, 4);
assert_eq!(results_n4, overlaps_n4);
}
#[test]
fn test_create_raw_graph() {
let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n3.insert(DMatrix::from_row_slice(1, 1, &vec![0]), 1);
let edges_n3: Edges = hash_map(&[
(0, vec![(1, 12), (2, 13), (4, 16), (5, 17), (6, 18), (8, 21), (9, 22), (10, 23)]),
(1, vec![(0, 11), (2, 12), (3, 13), (4, 15), (5, 16), (6, 17), (7, 18), (8, 20), (9, 21), (10, 22), (11, 23)]),
(2, vec![(0, 10), (1, 11), (3, 12), (4, 14), (5, 15), (6, 16), (7, 17), (8, 19), (9, 20), (10, 21), (11, 22)]),
(3, vec![(1, 10), (2, 11), (5, 14), (6, 15), (7, 16), (9, 19), (10, 20), (11, 21)]),
(4, vec![(0, 7), (1, 8), (2, 9), (5, 12), (6, 13), (8, 16), (9, 17), (10, 18), (12, 21), (13, 22), (14, 23)]),
]);
let all_labels: MSu16xNU = chunks_n3.values().collect();
let raw_graph = create_raw_graph(&all_labels, 3, (2, 2));
assert_eq!(raw_graph.edges.get(&0).unwrap(), edges_n3.get(&0).unwrap());
assert_eq!(raw_graph.edges.get(&1).unwrap(), edges_n3.get(&1).unwrap());
assert_eq!(raw_graph.edges.get(&2).unwrap(), edges_n3.get(&2).unwrap());
assert_eq!(raw_graph.edges.get(&3).unwrap(), edges_n3.get(&3).unwrap());
assert_eq!(raw_graph.edges.get(&4).unwrap(), edges_n3.get(&4).unwrap());
}
} | chunk_image | identifier_name |
olm_parser.rs | use crate::graph::graph::{Rules, Edges, Graph, Vertices};
use crate::io::{
limit_iter::Limit,
sub_matrix::SubMatrix,
tri_wave::TriWave,
utils::{DiagonalReflection, Reflection, Rotation},
};
use crate::utils::{index_to_coords, is_inside, coords_to_index};
use crate::wfc::collapse;
use bimap::BiMap;
use hashbrown::HashMap;
use image::{imageops, Rgb, RgbImage, Pixel};
use itertools::Itertools;
use nalgebra::DMatrix;
use std::ops::{IndexMut, Index, AddAssign};
use std::convert::TryFrom;
use indexmap::IndexMap;
use std::ops::Not;
use crate::MSu16xNU;
type Chunk = DMatrix<usize>;
type PixelKeys = BiMap<usize, Rgb<u8>>;
// TODO: handle unwrap of image::open properly
pub fn parse(filename: &str, chunk_size: usize) -> (Rules, PixelKeys, MSu16xNU, IndexMap<Chunk, u16>) {
let img = image::open(filename).unwrap().to_rgb8();
let pixel_aliases = alias_pixels(&img);
let chunk_frequencies = chunk_image(img, chunk_size, &pixel_aliases, true, false, false, false);
let overlap_rules = overlaps(&chunk_frequencies, chunk_size);
if chunk_frequencies.len() > MSu16xNU::len() {
println!("Chunks LEN: {}", chunk_frequencies.len());
panic!("labels multiset not large enough to store all unique chunks")
}
let all_labels = chunk_frequencies.values().collect();
let raw_graph = create_raw_graph(&all_labels, chunk_size, (3, 3));
let mut pruned_rules: Rules = HashMap::new();
(0..all_labels.count_non_zero())
.for_each(|label| {
// pruned graph vertices returned from collapse
let pruned_graph = propagate_overlaps(raw_graph.clone(), &overlap_rules, label as usize);
real_vertex_indexes(chunk_size)
.iter()
.enumerate()
.for_each(|(direction, index)| {
let set = pruned_graph.vertices.index(*index);
if!set.is_empty() {
pruned_rules.insert((direction as u16, label as usize), *set);
}
});
});
(pruned_rules, pixel_aliases, all_labels, chunk_frequencies)
}
// todo: work out if step will be needed, currently useless
const fn real_vertex_indexes(chunk_size: usize) -> [usize; 8] {
let dim = (3 * chunk_size) - (chunk_size - 1);
let step = chunk_size - 1;
[
0, // NW
step + 1, // N
(step + 1) * 2, // NE
dim * chunk_size, // W
// dim * chunk_size + step + 1 // Center (unused)
dim * chunk_size + (step + 1) * 2, // E
dim * chunk_size * 2, // SW
dim * chunk_size * 2 + step + 1, // S
dim * chunk_size * 2 + (step + 1) * 2, // SE
]
}
fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> {
let chunk_size_32: u32 = TryFrom::try_from(chunk_size)
.expect("chunk_size too large, cannot convert to u32");
let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1);
let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1);
height_iter
.cartesian_product(width_iter)
.map(move |(y, x)| {
imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image()
})
}
fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> {
image
.pixels()
.map(|p| *pixel_aliases.get_by_right(&p).unwrap())
.collect()
}
fn alias_pixels(image: &RgbImage) -> PixelKeys {
image
.pixels()
.unique()
.copied()
.enumerate()
.collect()
}
// returns the input image in unique chunks and frequencies of those chunks
fn chunk_image(
image: RgbImage,
chunk_size: usize,
pixel_aliases: &PixelKeys,
rotate: bool,
reflect_vertical: bool,
reflect_horizontal: bool,
reflect_diagonal: bool,
) -> IndexMap<Chunk, u16> {
sub_images(image, chunk_size)
.map(|sub_image| alias_sub_image(sub_image, pixel_aliases))
.fold(IndexMap::new(), |mut acc, aliases| {
let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases);
if rotate {
let mut rot_chunk = chunk.clone();
for _ in 0..3 {
rot_chunk = rot_chunk.rotate_90();
push_chunk_frequency(rot_chunk.clone(), &mut acc);
}
}
if reflect_vertical {
push_chunk_frequency(chunk.reflect_vertical(), &mut acc);
}
if reflect_horizontal {
push_chunk_frequency(chunk.reflect_horizontal(), &mut acc);
}
if reflect_diagonal {
push_chunk_frequency(chunk.reflect_top_left(), &mut acc);
push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc);
}
push_chunk_frequency(chunk, &mut acc);
acc
})
}
fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) {
frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1);
}
type Position = (usize, usize);
type Size = (usize, usize);
type Direction = u16;
fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> {
let period = chunk_size * 2 - 1;
let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period);
let pos_cart_prod = positions.clone().cartesian_product(positions);
pos_cart_prod
.map(|((y_position, y_size), (x_position, x_size))| (
(x_position, y_position),
(x_size + 1, y_size + 1)
))
.filter(|(_, (width, height))| width!= &chunk_size || height!= &chunk_size)
.enumerate()
.map(|(direction, (position, size))| (
position,
size,
direction as u16
))
.collect()
}
fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules {
chunks
.keys()
.enumerate()
.fold(HashMap::new(), |mut rules, (label, chunk)| {
let sub_positions = sub_chunk_positions(chunk_size);
sub_positions
.iter()
.for_each(|(position, size, direction)| {
let sub_chunk = chunk.sub_matrix(*position, *size);
let reverse_index = sub_positions.len() - 1 - *direction as usize;
let (rev_pos, rev_size, _) = sub_positions[reverse_index];
chunks
.keys()
.enumerate()
.for_each(|(other_label, other_chunk)| {
// find mirrored sub chunk
let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size);
if sub_chunk == other_sub_chunk |
})
});
rules
})
}
// Create a raw graph for pruning
fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph {
// pixel based graph dimensions
let v_dim_x = (width * chunk_size) - (chunk_size - 1);
let v_dim_y = (height * chunk_size) - (chunk_size - 1);
let vertices_len = v_dim_x * v_dim_y;
let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len];
// create negative indexed range to offset vertex centered directional field by N
let signed_chunk_size: i32 = TryFrom::try_from(chunk_size)
.expect("Cannot convert chunk_size to i32");
let range = 1 - signed_chunk_size..signed_chunk_size;
// calculate real cartesian space offest coordinates
let range_cart_prod = range.clone()
.cartesian_product(range)
.filter(|i| i!= &(0, 0)); // remove 0 offset for correct directional mapping
let edges: Edges = (0..vertices_len)
.fold(HashMap::new(), |mut acc, index| {
let (x, y) = index_to_coords(index, v_dim_x);
range_cart_prod
.clone()
.map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset))
.enumerate()
// remove coordinates outside of graph
.filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y)))
.for_each(|(direction, (y_offset, x_offset))| {
let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x);
acc
.entry(index as u32)
.and_modify(|v| v.push((other_index as u32, direction as u16)))
.or_insert(vec![(other_index as u32, direction as u16)]);
});
acc
});
Graph::new(vertices, edges, *all_labels)
}
fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph {
let central_vertex = (graph.vertices.len() - 1) / 2;
graph.vertices.index_mut(central_vertex).choose(label);
collapse::collapse(rules, &graph, None, Some(1))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::hash_map;
use image::ImageBuffer;
#[test]
fn test_alias_pixels() {
let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96];
let img = ImageBuffer::from_vec(2, 2, pixels).unwrap();
let pixel_aliases = alias_pixels(&img);
assert_eq!(pixel_aliases.len(), 4);
}
#[test]
fn test_chunk_image() {
let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8();
let mut pixel_aliases: PixelKeys = BiMap::new();
pixel_aliases.insert(0, Rgb::from([255, 255, 255]));
pixel_aliases.insert(1, Rgb::from([0, 0, 0]));
let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false);
let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new();
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 0, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 1, 0]), 2);
assert_eq!(chunk_map.len(), 8);
expected_map
.iter()
.for_each(|(chunk, frequency)| {
assert_eq!(chunk_map.get(chunk).unwrap(), frequency);
});
}
#[test]
fn test_subchunk_positions() {
let sub_chunks = vec![
((0, 0), (1, 1), 0),
((0, 0), (2, 1), 1),
((1, 0), (1, 1), 2),
((0, 0), (1, 2), 3),
// ((0, 0), (2, 2), 4) --> Implicit full overlap removed
((1, 0), (1, 2), 4),
((0, 1), (1, 1), 5),
((0, 1), (2, 1), 6),
((1, 1), (1, 1), 7)
];
assert_eq!(sub_chunk_positions(2), sub_chunks);
}
#[test]
fn test_overlaps() {
let mut chunks_n2: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 2, 3]), 1);
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![3, 2, 0, 1]), 1);
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![2, 0, 3, 1]), 1);
let mut overlaps_n2: Rules = HashMap::new();
overlaps_n2.insert((5, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((0, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((6, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((1, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((2, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((7, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((2, 2), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((5, 1), [0, 0, 1, 0].iter().collect());
let result_n2 = overlaps(&chunks_n2, 2);
assert_eq!(result_n2, overlaps_n2);
let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![0, 1, 2, 3, 4, 5, 6, 7, 8]), 1);
chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![9, 10, 11, 12, 13, 14, 15, 16, 0]), 1);
let mut overlaps_n3: Rules = HashMap::new();
overlaps_n3.insert((0, 0), [0, 1].iter().collect());
overlaps_n3.insert((23, 1), [1, 0].iter().collect());
let result_n3 = overlaps(&chunks_n3, 3);
assert_eq!(result_n3, overlaps_n3);
let mut chunks_n4: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n4.insert(DMatrix::from_row_slice(
4, 4, &vec![0, 0, 2, 3,
0, 1, 4, 5,
6, 7, 0, 0,
8, 9, 0, 1]), 1);
// test overlapping with self only
let mut overlaps_n4: Rules = HashMap::new();
overlaps_n4.insert((8, 0), [1, 0].iter().collect());
overlaps_n4.insert((39, 0), [1, 0].iter().collect());
let results_n4 = overlaps(&chunks_n4, 4);
assert_eq!(results_n4, overlaps_n4);
}
#[test]
fn test_create_raw_graph() {
let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n3.insert(DMatrix::from_row_slice(1, 1, &vec![0]), 1);
let edges_n3: Edges = hash_map(&[
(0, vec![(1, 12), (2, 13), (4, 16), (5, 17), (6, 18), (8, 21), (9, 22), (10, 23)]),
(1, vec![(0, 11), (2, 12), (3, 13), (4, 15), (5, 16), (6, 17), (7, 18), (8, 20), (9, 21), (10, 22), (11, 23)]),
(2, vec![(0, 10), (1, 11), (3, 12), (4, 14), (5, 15), (6, 16), (7, 17), (8, 19), (9, 20), (10, 21), (11, 22)]),
(3, vec![(1, 10), (2, 11), (5, 14), (6, 15), (7, 16), (9, 19), (10, 20), (11, 21)]),
(4, vec![(0, 7), (1, 8), (2, 9), (5, 12), (6, 13), (8, 16), (9, 17), (10, 18), (12, 21), (13, 22), (14, 23)]),
]);
let all_labels: MSu16xNU = chunks_n3.values().collect();
let raw_graph = create_raw_graph(&all_labels, 3, (2, 2));
assert_eq!(raw_graph.edges.get(&0).unwrap(), edges_n3.get(&0).unwrap());
assert_eq!(raw_graph.edges.get(&1).unwrap(), edges_n3.get(&1).unwrap());
assert_eq!(raw_graph.edges.get(&2).unwrap(), edges_n3.get(&2).unwrap());
assert_eq!(raw_graph.edges.get(&3).unwrap(), edges_n3.get(&3).unwrap());
assert_eq!(raw_graph.edges.get(&4).unwrap(), edges_n3.get(&4).unwrap());
}
} | {
let mut set = MSu16xNU::empty();
set.insert(other_label, 1);
rules
.entry((*direction, label))
.and_modify(|l| l.add_assign(set))
.or_insert(set);
} | conditional_block |
olm_parser.rs | use crate::graph::graph::{Rules, Edges, Graph, Vertices};
use crate::io::{
limit_iter::Limit,
sub_matrix::SubMatrix,
tri_wave::TriWave,
utils::{DiagonalReflection, Reflection, Rotation},
};
use crate::utils::{index_to_coords, is_inside, coords_to_index};
use crate::wfc::collapse;
use bimap::BiMap;
use hashbrown::HashMap;
use image::{imageops, Rgb, RgbImage, Pixel};
use itertools::Itertools;
use nalgebra::DMatrix;
use std::ops::{IndexMut, Index, AddAssign};
use std::convert::TryFrom;
use indexmap::IndexMap;
use std::ops::Not;
use crate::MSu16xNU;
type Chunk = DMatrix<usize>;
type PixelKeys = BiMap<usize, Rgb<u8>>;
// TODO: handle unwrap of image::open properly
pub fn parse(filename: &str, chunk_size: usize) -> (Rules, PixelKeys, MSu16xNU, IndexMap<Chunk, u16>) {
let img = image::open(filename).unwrap().to_rgb8();
let pixel_aliases = alias_pixels(&img);
let chunk_frequencies = chunk_image(img, chunk_size, &pixel_aliases, true, false, false, false);
let overlap_rules = overlaps(&chunk_frequencies, chunk_size);
if chunk_frequencies.len() > MSu16xNU::len() {
println!("Chunks LEN: {}", chunk_frequencies.len());
panic!("labels multiset not large enough to store all unique chunks")
}
let all_labels = chunk_frequencies.values().collect();
let raw_graph = create_raw_graph(&all_labels, chunk_size, (3, 3));
let mut pruned_rules: Rules = HashMap::new();
(0..all_labels.count_non_zero())
.for_each(|label| {
// pruned graph vertices returned from collapse
let pruned_graph = propagate_overlaps(raw_graph.clone(), &overlap_rules, label as usize);
real_vertex_indexes(chunk_size)
.iter()
.enumerate()
.for_each(|(direction, index)| {
let set = pruned_graph.vertices.index(*index);
if!set.is_empty() {
pruned_rules.insert((direction as u16, label as usize), *set);
}
});
});
(pruned_rules, pixel_aliases, all_labels, chunk_frequencies)
}
// todo: work out if step will be needed, currently useless
const fn real_vertex_indexes(chunk_size: usize) -> [usize; 8] {
let dim = (3 * chunk_size) - (chunk_size - 1);
let step = chunk_size - 1;
[
0, // NW
step + 1, // N
(step + 1) * 2, // NE
dim * chunk_size, // W
// dim * chunk_size + step + 1 // Center (unused)
dim * chunk_size + (step + 1) * 2, // E
dim * chunk_size * 2, // SW
dim * chunk_size * 2 + step + 1, // S
dim * chunk_size * 2 + (step + 1) * 2, // SE
]
}
fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> {
let chunk_size_32: u32 = TryFrom::try_from(chunk_size)
.expect("chunk_size too large, cannot convert to u32");
let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1);
let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1);
height_iter
.cartesian_product(width_iter)
.map(move |(y, x)| {
imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image()
})
}
fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> {
image
.pixels()
.map(|p| *pixel_aliases.get_by_right(&p).unwrap())
.collect()
}
fn alias_pixels(image: &RgbImage) -> PixelKeys {
image
.pixels()
.unique()
.copied()
.enumerate()
.collect()
}
// returns the input image in unique chunks and frequencies of those chunks
fn chunk_image(
image: RgbImage,
chunk_size: usize,
pixel_aliases: &PixelKeys,
rotate: bool,
reflect_vertical: bool,
reflect_horizontal: bool,
reflect_diagonal: bool,
) -> IndexMap<Chunk, u16> {
sub_images(image, chunk_size)
.map(|sub_image| alias_sub_image(sub_image, pixel_aliases))
.fold(IndexMap::new(), |mut acc, aliases| {
let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases);
if rotate {
let mut rot_chunk = chunk.clone();
for _ in 0..3 {
rot_chunk = rot_chunk.rotate_90();
push_chunk_frequency(rot_chunk.clone(), &mut acc);
}
}
if reflect_vertical {
push_chunk_frequency(chunk.reflect_vertical(), &mut acc);
}
if reflect_horizontal {
push_chunk_frequency(chunk.reflect_horizontal(), &mut acc);
}
if reflect_diagonal {
push_chunk_frequency(chunk.reflect_top_left(), &mut acc);
push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc);
}
push_chunk_frequency(chunk, &mut acc);
acc
})
}
fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) {
frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1);
}
type Position = (usize, usize);
type Size = (usize, usize);
type Direction = u16;
fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> {
let period = chunk_size * 2 - 1;
let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period);
let pos_cart_prod = positions.clone().cartesian_product(positions);
pos_cart_prod
.map(|((y_position, y_size), (x_position, x_size))| (
(x_position, y_position),
(x_size + 1, y_size + 1)
))
.filter(|(_, (width, height))| width!= &chunk_size || height!= &chunk_size)
.enumerate()
.map(|(direction, (position, size))| (
position,
size,
direction as u16
))
.collect()
}
fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules {
chunks
.keys()
.enumerate()
.fold(HashMap::new(), |mut rules, (label, chunk)| {
let sub_positions = sub_chunk_positions(chunk_size);
sub_positions
.iter()
.for_each(|(position, size, direction)| {
let sub_chunk = chunk.sub_matrix(*position, *size);
let reverse_index = sub_positions.len() - 1 - *direction as usize;
let (rev_pos, rev_size, _) = sub_positions[reverse_index];
chunks
.keys()
.enumerate()
.for_each(|(other_label, other_chunk)| {
// find mirrored sub chunk
let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size);
if sub_chunk == other_sub_chunk {
let mut set = MSu16xNU::empty();
set.insert(other_label, 1);
rules
.entry((*direction, label))
.and_modify(|l| l.add_assign(set)) | })
}
// Create a raw graph for pruning
fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph {
// pixel based graph dimensions
let v_dim_x = (width * chunk_size) - (chunk_size - 1);
let v_dim_y = (height * chunk_size) - (chunk_size - 1);
let vertices_len = v_dim_x * v_dim_y;
let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len];
// create negative indexed range to offset vertex centered directional field by N
let signed_chunk_size: i32 = TryFrom::try_from(chunk_size)
.expect("Cannot convert chunk_size to i32");
let range = 1 - signed_chunk_size..signed_chunk_size;
// calculate real cartesian space offest coordinates
let range_cart_prod = range.clone()
.cartesian_product(range)
.filter(|i| i!= &(0, 0)); // remove 0 offset for correct directional mapping
let edges: Edges = (0..vertices_len)
.fold(HashMap::new(), |mut acc, index| {
let (x, y) = index_to_coords(index, v_dim_x);
range_cart_prod
.clone()
.map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset))
.enumerate()
// remove coordinates outside of graph
.filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y)))
.for_each(|(direction, (y_offset, x_offset))| {
let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x);
acc
.entry(index as u32)
.and_modify(|v| v.push((other_index as u32, direction as u16)))
.or_insert(vec![(other_index as u32, direction as u16)]);
});
acc
});
Graph::new(vertices, edges, *all_labels)
}
fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph {
let central_vertex = (graph.vertices.len() - 1) / 2;
graph.vertices.index_mut(central_vertex).choose(label);
collapse::collapse(rules, &graph, None, Some(1))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::hash_map;
use image::ImageBuffer;
#[test]
fn test_alias_pixels() {
let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96];
let img = ImageBuffer::from_vec(2, 2, pixels).unwrap();
let pixel_aliases = alias_pixels(&img);
assert_eq!(pixel_aliases.len(), 4);
}
#[test]
fn test_chunk_image() {
let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8();
let mut pixel_aliases: PixelKeys = BiMap::new();
pixel_aliases.insert(0, Rgb::from([255, 255, 255]));
pixel_aliases.insert(1, Rgb::from([0, 0, 0]));
let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false);
let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new();
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 0, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 1, 0]), 2);
assert_eq!(chunk_map.len(), 8);
expected_map
.iter()
.for_each(|(chunk, frequency)| {
assert_eq!(chunk_map.get(chunk).unwrap(), frequency);
});
}
#[test]
fn test_subchunk_positions() {
let sub_chunks = vec![
((0, 0), (1, 1), 0),
((0, 0), (2, 1), 1),
((1, 0), (1, 1), 2),
((0, 0), (1, 2), 3),
// ((0, 0), (2, 2), 4) --> Implicit full overlap removed
((1, 0), (1, 2), 4),
((0, 1), (1, 1), 5),
((0, 1), (2, 1), 6),
((1, 1), (1, 1), 7)
];
assert_eq!(sub_chunk_positions(2), sub_chunks);
}
#[test]
fn test_overlaps() {
let mut chunks_n2: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 2, 3]), 1);
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![3, 2, 0, 1]), 1);
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![2, 0, 3, 1]), 1);
let mut overlaps_n2: Rules = HashMap::new();
overlaps_n2.insert((5, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((0, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((6, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((1, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((2, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((7, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((2, 2), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((5, 1), [0, 0, 1, 0].iter().collect());
let result_n2 = overlaps(&chunks_n2, 2);
assert_eq!(result_n2, overlaps_n2);
let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![0, 1, 2, 3, 4, 5, 6, 7, 8]), 1);
chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![9, 10, 11, 12, 13, 14, 15, 16, 0]), 1);
let mut overlaps_n3: Rules = HashMap::new();
overlaps_n3.insert((0, 0), [0, 1].iter().collect());
overlaps_n3.insert((23, 1), [1, 0].iter().collect());
let result_n3 = overlaps(&chunks_n3, 3);
assert_eq!(result_n3, overlaps_n3);
let mut chunks_n4: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n4.insert(DMatrix::from_row_slice(
4, 4, &vec![0, 0, 2, 3,
0, 1, 4, 5,
6, 7, 0, 0,
8, 9, 0, 1]), 1);
// test overlapping with self only
let mut overlaps_n4: Rules = HashMap::new();
overlaps_n4.insert((8, 0), [1, 0].iter().collect());
overlaps_n4.insert((39, 0), [1, 0].iter().collect());
let results_n4 = overlaps(&chunks_n4, 4);
assert_eq!(results_n4, overlaps_n4);
}
#[test]
fn test_create_raw_graph() {
let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n3.insert(DMatrix::from_row_slice(1, 1, &vec![0]), 1);
let edges_n3: Edges = hash_map(&[
(0, vec![(1, 12), (2, 13), (4, 16), (5, 17), (6, 18), (8, 21), (9, 22), (10, 23)]),
(1, vec![(0, 11), (2, 12), (3, 13), (4, 15), (5, 16), (6, 17), (7, 18), (8, 20), (9, 21), (10, 22), (11, 23)]),
(2, vec![(0, 10), (1, 11), (3, 12), (4, 14), (5, 15), (6, 16), (7, 17), (8, 19), (9, 20), (10, 21), (11, 22)]),
(3, vec![(1, 10), (2, 11), (5, 14), (6, 15), (7, 16), (9, 19), (10, 20), (11, 21)]),
(4, vec![(0, 7), (1, 8), (2, 9), (5, 12), (6, 13), (8, 16), (9, 17), (10, 18), (12, 21), (13, 22), (14, 23)]),
]);
let all_labels: MSu16xNU = chunks_n3.values().collect();
let raw_graph = create_raw_graph(&all_labels, 3, (2, 2));
assert_eq!(raw_graph.edges.get(&0).unwrap(), edges_n3.get(&0).unwrap());
assert_eq!(raw_graph.edges.get(&1).unwrap(), edges_n3.get(&1).unwrap());
assert_eq!(raw_graph.edges.get(&2).unwrap(), edges_n3.get(&2).unwrap());
assert_eq!(raw_graph.edges.get(&3).unwrap(), edges_n3.get(&3).unwrap());
assert_eq!(raw_graph.edges.get(&4).unwrap(), edges_n3.get(&4).unwrap());
}
} | .or_insert(set);
}
})
});
rules | random_line_split |
olm_parser.rs | use crate::graph::graph::{Rules, Edges, Graph, Vertices};
use crate::io::{
limit_iter::Limit,
sub_matrix::SubMatrix,
tri_wave::TriWave,
utils::{DiagonalReflection, Reflection, Rotation},
};
use crate::utils::{index_to_coords, is_inside, coords_to_index};
use crate::wfc::collapse;
use bimap::BiMap;
use hashbrown::HashMap;
use image::{imageops, Rgb, RgbImage, Pixel};
use itertools::Itertools;
use nalgebra::DMatrix;
use std::ops::{IndexMut, Index, AddAssign};
use std::convert::TryFrom;
use indexmap::IndexMap;
use std::ops::Not;
use crate::MSu16xNU;
type Chunk = DMatrix<usize>;
type PixelKeys = BiMap<usize, Rgb<u8>>;
// TODO: handle unwrap of image::open properly
pub fn parse(filename: &str, chunk_size: usize) -> (Rules, PixelKeys, MSu16xNU, IndexMap<Chunk, u16>) {
let img = image::open(filename).unwrap().to_rgb8();
let pixel_aliases = alias_pixels(&img);
let chunk_frequencies = chunk_image(img, chunk_size, &pixel_aliases, true, false, false, false);
let overlap_rules = overlaps(&chunk_frequencies, chunk_size);
if chunk_frequencies.len() > MSu16xNU::len() {
println!("Chunks LEN: {}", chunk_frequencies.len());
panic!("labels multiset not large enough to store all unique chunks")
}
let all_labels = chunk_frequencies.values().collect();
let raw_graph = create_raw_graph(&all_labels, chunk_size, (3, 3));
let mut pruned_rules: Rules = HashMap::new();
(0..all_labels.count_non_zero())
.for_each(|label| {
// pruned graph vertices returned from collapse
let pruned_graph = propagate_overlaps(raw_graph.clone(), &overlap_rules, label as usize);
real_vertex_indexes(chunk_size)
.iter()
.enumerate()
.for_each(|(direction, index)| {
let set = pruned_graph.vertices.index(*index);
if!set.is_empty() {
pruned_rules.insert((direction as u16, label as usize), *set);
}
});
});
(pruned_rules, pixel_aliases, all_labels, chunk_frequencies)
}
// todo: work out if step will be needed, currently useless
const fn real_vertex_indexes(chunk_size: usize) -> [usize; 8] {
let dim = (3 * chunk_size) - (chunk_size - 1);
let step = chunk_size - 1;
[
0, // NW
step + 1, // N
(step + 1) * 2, // NE
dim * chunk_size, // W
// dim * chunk_size + step + 1 // Center (unused)
dim * chunk_size + (step + 1) * 2, // E
dim * chunk_size * 2, // SW
dim * chunk_size * 2 + step + 1, // S
dim * chunk_size * 2 + (step + 1) * 2, // SE
]
}
fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> {
let chunk_size_32: u32 = TryFrom::try_from(chunk_size)
.expect("chunk_size too large, cannot convert to u32");
let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1);
let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1);
height_iter
.cartesian_product(width_iter)
.map(move |(y, x)| {
imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image()
})
}
fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> {
image
.pixels()
.map(|p| *pixel_aliases.get_by_right(&p).unwrap())
.collect()
}
fn alias_pixels(image: &RgbImage) -> PixelKeys {
image
.pixels()
.unique()
.copied()
.enumerate()
.collect()
}
// returns the input image in unique chunks and frequencies of those chunks
fn chunk_image(
image: RgbImage,
chunk_size: usize,
pixel_aliases: &PixelKeys,
rotate: bool,
reflect_vertical: bool,
reflect_horizontal: bool,
reflect_diagonal: bool,
) -> IndexMap<Chunk, u16> {
sub_images(image, chunk_size)
.map(|sub_image| alias_sub_image(sub_image, pixel_aliases))
.fold(IndexMap::new(), |mut acc, aliases| {
let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases);
if rotate {
let mut rot_chunk = chunk.clone();
for _ in 0..3 {
rot_chunk = rot_chunk.rotate_90();
push_chunk_frequency(rot_chunk.clone(), &mut acc);
}
}
if reflect_vertical {
push_chunk_frequency(chunk.reflect_vertical(), &mut acc);
}
if reflect_horizontal {
push_chunk_frequency(chunk.reflect_horizontal(), &mut acc);
}
if reflect_diagonal {
push_chunk_frequency(chunk.reflect_top_left(), &mut acc);
push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc);
}
push_chunk_frequency(chunk, &mut acc);
acc
})
}
fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) {
frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1);
}
type Position = (usize, usize);
type Size = (usize, usize);
type Direction = u16;
fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> {
let period = chunk_size * 2 - 1;
let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period);
let pos_cart_prod = positions.clone().cartesian_product(positions);
pos_cart_prod
.map(|((y_position, y_size), (x_position, x_size))| (
(x_position, y_position),
(x_size + 1, y_size + 1)
))
.filter(|(_, (width, height))| width!= &chunk_size || height!= &chunk_size)
.enumerate()
.map(|(direction, (position, size))| (
position,
size,
direction as u16
))
.collect()
}
fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules {
chunks
.keys()
.enumerate()
.fold(HashMap::new(), |mut rules, (label, chunk)| {
let sub_positions = sub_chunk_positions(chunk_size);
sub_positions
.iter()
.for_each(|(position, size, direction)| {
let sub_chunk = chunk.sub_matrix(*position, *size);
let reverse_index = sub_positions.len() - 1 - *direction as usize;
let (rev_pos, rev_size, _) = sub_positions[reverse_index];
chunks
.keys()
.enumerate()
.for_each(|(other_label, other_chunk)| {
// find mirrored sub chunk
let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size);
if sub_chunk == other_sub_chunk {
let mut set = MSu16xNU::empty();
set.insert(other_label, 1);
rules
.entry((*direction, label))
.and_modify(|l| l.add_assign(set))
.or_insert(set);
}
})
});
rules
})
}
// Create a raw graph for pruning
fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph {
// pixel based graph dimensions
let v_dim_x = (width * chunk_size) - (chunk_size - 1);
let v_dim_y = (height * chunk_size) - (chunk_size - 1);
let vertices_len = v_dim_x * v_dim_y;
let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len];
// create negative indexed range to offset vertex centered directional field by N
let signed_chunk_size: i32 = TryFrom::try_from(chunk_size)
.expect("Cannot convert chunk_size to i32");
let range = 1 - signed_chunk_size..signed_chunk_size;
// calculate real cartesian space offest coordinates
let range_cart_prod = range.clone()
.cartesian_product(range)
.filter(|i| i!= &(0, 0)); // remove 0 offset for correct directional mapping
let edges: Edges = (0..vertices_len)
.fold(HashMap::new(), |mut acc, index| {
let (x, y) = index_to_coords(index, v_dim_x);
range_cart_prod
.clone()
.map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset))
.enumerate()
// remove coordinates outside of graph
.filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y)))
.for_each(|(direction, (y_offset, x_offset))| {
let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x);
acc
.entry(index as u32)
.and_modify(|v| v.push((other_index as u32, direction as u16)))
.or_insert(vec![(other_index as u32, direction as u16)]);
});
acc
});
Graph::new(vertices, edges, *all_labels)
}
fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph {
let central_vertex = (graph.vertices.len() - 1) / 2;
graph.vertices.index_mut(central_vertex).choose(label);
collapse::collapse(rules, &graph, None, Some(1))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::hash_map;
use image::ImageBuffer;
#[test]
fn test_alias_pixels() {
let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96];
let img = ImageBuffer::from_vec(2, 2, pixels).unwrap();
let pixel_aliases = alias_pixels(&img);
assert_eq!(pixel_aliases.len(), 4);
}
#[test]
fn test_chunk_image() | .iter()
.for_each(|(chunk, frequency)| {
assert_eq!(chunk_map.get(chunk).unwrap(), frequency);
});
}
#[test]
fn test_subchunk_positions() {
let sub_chunks = vec![
((0, 0), (1, 1), 0),
((0, 0), (2, 1), 1),
((1, 0), (1, 1), 2),
((0, 0), (1, 2), 3),
// ((0, 0), (2, 2), 4) --> Implicit full overlap removed
((1, 0), (1, 2), 4),
((0, 1), (1, 1), 5),
((0, 1), (2, 1), 6),
((1, 1), (1, 1), 7)
];
assert_eq!(sub_chunk_positions(2), sub_chunks);
}
#[test]
fn test_overlaps() {
let mut chunks_n2: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 2, 3]), 1);
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![3, 2, 0, 1]), 1);
chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![2, 0, 3, 1]), 1);
let mut overlaps_n2: Rules = HashMap::new();
overlaps_n2.insert((5, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((0, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((6, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((1, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((2, 1), [1, 0, 0, 0].iter().collect());
overlaps_n2.insert((7, 0), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((2, 2), [0, 1, 0, 0].iter().collect());
overlaps_n2.insert((5, 1), [0, 0, 1, 0].iter().collect());
let result_n2 = overlaps(&chunks_n2, 2);
assert_eq!(result_n2, overlaps_n2);
let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![0, 1, 2, 3, 4, 5, 6, 7, 8]), 1);
chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![9, 10, 11, 12, 13, 14, 15, 16, 0]), 1);
let mut overlaps_n3: Rules = HashMap::new();
overlaps_n3.insert((0, 0), [0, 1].iter().collect());
overlaps_n3.insert((23, 1), [1, 0].iter().collect());
let result_n3 = overlaps(&chunks_n3, 3);
assert_eq!(result_n3, overlaps_n3);
let mut chunks_n4: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n4.insert(DMatrix::from_row_slice(
4, 4, &vec![0, 0, 2, 3,
0, 1, 4, 5,
6, 7, 0, 0,
8, 9, 0, 1]), 1);
// test overlapping with self only
let mut overlaps_n4: Rules = HashMap::new();
overlaps_n4.insert((8, 0), [1, 0].iter().collect());
overlaps_n4.insert((39, 0), [1, 0].iter().collect());
let results_n4 = overlaps(&chunks_n4, 4);
assert_eq!(results_n4, overlaps_n4);
}
#[test]
fn test_create_raw_graph() {
let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new();
chunks_n3.insert(DMatrix::from_row_slice(1, 1, &vec![0]), 1);
let edges_n3: Edges = hash_map(&[
(0, vec![(1, 12), (2, 13), (4, 16), (5, 17), (6, 18), (8, 21), (9, 22), (10, 23)]),
(1, vec![(0, 11), (2, 12), (3, 13), (4, 15), (5, 16), (6, 17), (7, 18), (8, 20), (9, 21), (10, 22), (11, 23)]),
(2, vec![(0, 10), (1, 11), (3, 12), (4, 14), (5, 15), (6, 16), (7, 17), (8, 19), (9, 20), (10, 21), (11, 22)]),
(3, vec![(1, 10), (2, 11), (5, 14), (6, 15), (7, 16), (9, 19), (10, 20), (11, 21)]),
(4, vec![(0, 7), (1, 8), (2, 9), (5, 12), (6, 13), (8, 16), (9, 17), (10, 18), (12, 21), (13, 22), (14, 23)]),
]);
let all_labels: MSu16xNU = chunks_n3.values().collect();
let raw_graph = create_raw_graph(&all_labels, 3, (2, 2));
assert_eq!(raw_graph.edges.get(&0).unwrap(), edges_n3.get(&0).unwrap());
assert_eq!(raw_graph.edges.get(&1).unwrap(), edges_n3.get(&1).unwrap());
assert_eq!(raw_graph.edges.get(&2).unwrap(), edges_n3.get(&2).unwrap());
assert_eq!(raw_graph.edges.get(&3).unwrap(), edges_n3.get(&3).unwrap());
assert_eq!(raw_graph.edges.get(&4).unwrap(), edges_n3.get(&4).unwrap());
}
} | {
let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8();
let mut pixel_aliases: PixelKeys = BiMap::new();
pixel_aliases.insert(0, Rgb::from([255, 255, 255]));
pixel_aliases.insert(1, Rgb::from([0, 0, 0]));
let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false);
let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new();
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 0, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 1, 0]), 2);
assert_eq!(chunk_map.len(), 8);
expected_map | identifier_body |
wasm.rs | use std::collections::HashMap;
use std::convert::TryFrom;
use crate::error::Error;
/// The allowable types for any real value in wasm (u8 and others are packed)
#[derive(Copy, Clone, PartialEq)]
pub enum PrimitiveType {
I32,
I64,
F32,
F64,
}
impl From<i32> for PrimitiveType {
fn from(_: i32) -> PrimitiveType {
PrimitiveType::I32
}
}
impl From<i64> for PrimitiveType {
fn from(_: i64) -> PrimitiveType {
PrimitiveType::I64
}
}
impl From<f32> for PrimitiveType {
fn from(_: f32) -> PrimitiveType {
PrimitiveType::F32
}
}
impl From<f64> for PrimitiveType {
fn from(_: f64) -> PrimitiveType {
PrimitiveType::F64
}
}
/// Storage type for all wasm values
#[derive(Copy, Clone)]
pub union InternalValue {
i32: i32,
i64: i64,
f32: f32,
f64: f64,
}
impl From<i32> for InternalValue {
fn from(x: i32) -> InternalValue {
InternalValue { i32: x }
}
}
impl From<i64> for InternalValue {
fn from(x: i64) -> InternalValue {
InternalValue { i64: x }
}
}
impl From<f32> for InternalValue {
fn from(x: f32) -> InternalValue {
InternalValue { f32: x }
}
}
impl From<f64> for InternalValue {
fn from(x: f64) -> InternalValue {
InternalValue { f64: x }
}
}
/// Representation of all wasm values
#[derive(Copy, Clone)]
pub struct Value {
t: PrimitiveType,
v: InternalValue,
}
impl Value {
pub fn new<T: Into<InternalValue> + Into<PrimitiveType> + Copy>(x: T) -> Self {
Self {
t: x.into(),
v: x.into(),
}
}
pub fn from_explicit_type(t: PrimitiveType, v: u64) -> Value {
Self {
t,
v: InternalValue { i64: v as i64 },
}
}
#[inline]
pub fn as_i32_unchecked(&self) -> i32 {
unsafe { self.v.i32 }
}
#[inline]
pub fn as_i64_unchecked(&self) -> i64 {
unsafe { self.v.i64 }
}
#[inline]
pub fn as_f32_unchecked(&self) -> f32 {
unsafe { self.v.f32 }
}
#[inline]
pub fn as_f64_unchecked(&self) -> f64 {
unsafe { self.v.f64 }
}
}
impl From<i32> for Value {
fn from(v: i32) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<i64> for Value {
fn from(v: i64) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<f32> for Value {
fn from(v: f32) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<f64> for Value {
fn from(v: f64) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl TryFrom<Value> for u32 {
type Error = Error;
fn try_from(x: Value) -> Result<u32, Error> {
match x.t {
PrimitiveType::I32 => Ok(unsafe { x.v.i32 as u32 }),
_ => Err(Error::Misc("Cannot extract as u32 from incorrect type")),
}
}
}
impl From<&PrimitiveType> for Value {
fn from(x: &PrimitiveType) -> Value {
match x {
PrimitiveType::I32 => Value::new(0_i32),
PrimitiveType::I64 => Value::new(0_i64),
PrimitiveType::F32 => Value::new(0_f32),
PrimitiveType::F64 => Value::new(0_f64),
}
}
}
impl std::fmt::Display for Value {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
unsafe {
match self.t {
PrimitiveType::I32 => {
write!(f, "(i32:{})", self.v.i32)
}
PrimitiveType::I64 => {
write!(f, "(i64:{})", self.v.i64)
}
PrimitiveType::F32 => {
write!(f, "(f32:{})", self.v.f32)
}
PrimitiveType::F64 => {
write!(f, "(f64:{})", self.v.f64)
}
}
}
}
}
/// Represents expected runtime errors, i.e. problems with the program, not the interpreter
pub enum Trap {
MemoryOutOfBounds,
UndefinedDivision,
}
pub enum ControlInfo {
Branch(u32),
Return,
Trap(Trap),
None,
}
/// Representation of a wasm stack.
/// All functions use a new stack when called.
#[derive(Default)]
pub struct Stack {
values: Vec<Value>,
}
impl Stack {
fn new() -> Self {
Self::default()
}
fn push_value(&mut self, v: Value) {
log::debug!("Pushing {}", v);
self.values.push(v);
}
pub fn pop_value(&mut self) -> Result<Value, Error> {
log::debug!("Current stack len {}", self.values.len());
if self.values.is_empty() {
Err(Error::StackViolation)
} else {
unsafe { Ok(self.values.pop().unwrap_unchecked()) }
}
}
/// Return the 0-indexed offset'th value from the stack (such that 0 is the most recently pushed value)
pub fn fetch_value(&self, offset: usize) -> Result<&Value, Error> {
let stack_size = self.values.len();
let offset_to_fetch = stack_size - 1 - offset;
match self.values.get(offset_to_fetch) {
Some(n) => Ok(n),
None => {
log::debug!("Try to read {} stack size {}", offset_to_fetch, stack_size);
Err(Error::StackViolation)
}
}
}
pub fn assert_empty(&self) -> Result<(), Error> {
if self.values.is_empty() {
Ok(())
} else {
Err(Error::StackViolation)
}
}
}
impl std::fmt::Display for Stack {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "Current stack:\n[")?;
for v in self.values.iter() {
writeln!(f, " {}", v)?;
}
write!(f, "]\n\n")?;
Ok(())
}
}
pub trait Instruction {
/// A wasm instruction may modify any state of the program
fn execute(
&self,
stack: &mut Stack,
memory: &mut Memory,
locals: &mut Vec<Value>,
functions: &Vec<Function>,
) -> Result<ControlInfo, Error>;
}
pub mod inst;
#[derive(Default)]
struct Table {
functions: Vec<usize>,
}
pub struct Function {
r#type: FunctionType,
local_types: Vec<PrimitiveType>,
instructions: Vec<Box<dyn Instruction>>,
}
impl Function {
pub fn new(r#type: FunctionType) -> Self {
Self {
r#type,
local_types: Vec::new(),
instructions: Vec::new(),
}
}
pub fn push_inst(&mut self, i: Box<dyn Instruction>) {
self.instructions.push(i);
}
pub fn num_params(&self) -> usize {
self.r#type.num_params()
}
pub fn num_locals(&self) -> usize {
self.local_types.len()
}
pub fn new_locals(&mut self, count: usize, t: PrimitiveType) {
self.local_types.reserve(count);
for _ in 0..count {
self.local_types.push(t);
}
}
fn do_return(mut stack: Stack) -> Result<Value, Error> {
let ret = stack.pop_value();
stack.assert_empty()?;
ret
}
pub fn call(
&self,
functions: &Vec<Function>,
memory: &mut Memory,
args: Vec<Value>,
) -> Result<Value, Error> {
let mut stack = Stack::new();
let mut locals = Vec::with_capacity(self.num_params() + self.num_locals());
for arg in args {
locals.push(arg);
}
for t in &self.local_types {
locals.push(Value::from(t));
}
for instruction in &self.instructions {
match instruction.execute(&mut stack, memory, &mut locals, functions)? {
ControlInfo::Return => {
return Self::do_return(stack);
}
ControlInfo::Trap(Trap::MemoryOutOfBounds) => panic!(), //TODO: don't panic, handle traps gracefully
ControlInfo::Trap(Trap::UndefinedDivision) => panic!(),
_ => (),
};
}
Self::do_return(stack)
}
}
#[derive(Default)]
pub struct Memory {
bytes: Vec<u8>,
virtual_size_pages: u32,
upper_limit_pages: u32,
}
const PAGE_SIZE: u64 = 0x10000;
impl Memory {
pub fn new(min: u32, max: u32) -> Self {
let mut s = Self {
bytes: Vec::with_capacity((PAGE_SIZE * min as u64) as usize),
virtual_size_pages: min,
upper_limit_pages: max,
};
s.write(PAGE_SIZE * min as u64, 32, 4); // It looks like
s
}
pub fn write(&mut self, mut value: u64, bitwidth: u8, address: u64) -> Option<()> {
log::debug!(
"Write to address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
value
);
if bitwidth % 8!= 0 {
// Probably don't even need to implement this
panic!();
}
let bytes_to_write = bitwidth / 8;
let last_write_address = address + bytes_to_write as u64;
// Check for out of bounds access
if last_write_address > PAGE_SIZE * self.virtual_size_pages as u64 {
return None;
}
// Resize internal vector if needed
if self.bytes.is_empty() || last_write_address > (self.bytes.len() - 1) as u64 {
self.bytes.resize((last_write_address + 1) as usize, 0);
}
for i in (address..(address + bytes_to_write as u64)).rev() {
self.bytes[i as usize] = (value & 0xFF) as u8;
value >>= 8;
}
Some(())
}
pub fn read(
&mut self,
result_type: PrimitiveType,
bitwidth: u8,
address: u64,
) -> Option<Value> {
let bytes_to_read = (bitwidth / 8) as u64;
let mut result = 0_u64;
for i in address..(address + bytes_to_read) {
result <<= 8;
result += self.bytes[i as usize] as u64;
}
log::debug!(
"Read from address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
result
);
Some(Value::from_explicit_type(result_type, result))
}
}
#[derive(Default, Clone)]
pub struct FunctionType {
pub params: Vec<PrimitiveType>,
pub returns: Vec<PrimitiveType>,
}
impl FunctionType {
pub fn new(params: Vec<PrimitiveType>, returns: Vec<PrimitiveType>) -> Self {
Self { params, returns }
}
pub fn | (&self) -> usize {
self.params.len()
}
pub fn params_iter(&self) -> std::slice::Iter<PrimitiveType> {
self.params.iter()
}
}
pub enum Export {
Function(usize),
Table(usize),
Memory(usize),
Global(usize),
}
#[derive(Default)]
pub struct Module {
function_types: Vec<FunctionType>,
functions: Vec<Function>,
exports: HashMap<String, Export>,
table: Table,
memory: Memory,
globals: Vec<Value>,
}
impl Module {
pub fn new() -> Self {
Self::default()
}
pub fn call(&mut self, function_name: &str, args: Vec<Value>) -> Result<Value, Error> {
let function_index = match self.exports.get(function_name) {
Some(Export::Function(n)) => *n,
_ => return Err(Error::Misc("On module call, given name is not a function")),
};
let function = match self.functions.get(function_index) {
Some(n) => n,
None => {
return Err(Error::Misc(
"Function index given by export section is not valid",
))
}
};
function.call(&self.functions, &mut self.memory, args)
}
pub fn add_function_type(&mut self, ft: FunctionType) {
self.function_types.push(ft);
}
pub fn get_function_type(&self, i: usize) -> FunctionType {
self.function_types[i].clone()
}
pub fn add_function(&mut self, f: Function) {
self.functions.push(f);
}
pub fn add_memory(&mut self, m: Memory) {
self.memory = m;
}
pub fn add_export(&mut self, name: String, export: Export) -> Result<(), Error> {
if self.exports.contains_key(&name) {
return Err(Error::UnexpectedData("Expected a unique export name"));
}
self.exports.insert(name, export);
Ok(())
}
pub fn get_mut_function(&mut self, i: usize) -> &mut Function {
&mut self.functions[i]
}
}
| num_params | identifier_name |
wasm.rs | use std::collections::HashMap;
use std::convert::TryFrom;
use crate::error::Error;
/// The allowable types for any real value in wasm (u8 and others are packed)
#[derive(Copy, Clone, PartialEq)]
pub enum PrimitiveType {
I32,
I64,
F32,
F64,
}
impl From<i32> for PrimitiveType {
fn from(_: i32) -> PrimitiveType {
PrimitiveType::I32
}
}
impl From<i64> for PrimitiveType {
fn from(_: i64) -> PrimitiveType {
PrimitiveType::I64
}
}
impl From<f32> for PrimitiveType {
fn from(_: f32) -> PrimitiveType {
PrimitiveType::F32
}
}
impl From<f64> for PrimitiveType {
fn from(_: f64) -> PrimitiveType {
PrimitiveType::F64
}
}
/// Storage type for all wasm values
#[derive(Copy, Clone)]
pub union InternalValue {
i32: i32,
i64: i64,
f32: f32,
f64: f64,
}
impl From<i32> for InternalValue {
fn from(x: i32) -> InternalValue {
InternalValue { i32: x }
}
}
impl From<i64> for InternalValue {
fn from(x: i64) -> InternalValue {
InternalValue { i64: x }
}
}
impl From<f32> for InternalValue {
fn from(x: f32) -> InternalValue {
InternalValue { f32: x }
}
}
impl From<f64> for InternalValue {
fn from(x: f64) -> InternalValue {
InternalValue { f64: x }
}
}
/// Representation of all wasm values
#[derive(Copy, Clone)]
pub struct Value {
t: PrimitiveType,
v: InternalValue,
}
impl Value {
pub fn new<T: Into<InternalValue> + Into<PrimitiveType> + Copy>(x: T) -> Self {
Self {
t: x.into(),
v: x.into(),
}
}
pub fn from_explicit_type(t: PrimitiveType, v: u64) -> Value {
Self {
t,
v: InternalValue { i64: v as i64 },
}
}
#[inline]
pub fn as_i32_unchecked(&self) -> i32 {
unsafe { self.v.i32 }
}
#[inline]
pub fn as_i64_unchecked(&self) -> i64 {
unsafe { self.v.i64 }
}
#[inline]
pub fn as_f32_unchecked(&self) -> f32 {
unsafe { self.v.f32 }
}
#[inline]
pub fn as_f64_unchecked(&self) -> f64 {
unsafe { self.v.f64 }
}
}
impl From<i32> for Value {
fn from(v: i32) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<i64> for Value {
fn from(v: i64) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<f32> for Value {
fn from(v: f32) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<f64> for Value {
fn from(v: f64) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl TryFrom<Value> for u32 {
type Error = Error;
fn try_from(x: Value) -> Result<u32, Error> {
match x.t {
PrimitiveType::I32 => Ok(unsafe { x.v.i32 as u32 }),
_ => Err(Error::Misc("Cannot extract as u32 from incorrect type")),
}
}
}
impl From<&PrimitiveType> for Value {
fn from(x: &PrimitiveType) -> Value {
match x {
PrimitiveType::I32 => Value::new(0_i32),
PrimitiveType::I64 => Value::new(0_i64),
PrimitiveType::F32 => Value::new(0_f32),
PrimitiveType::F64 => Value::new(0_f64),
}
}
}
impl std::fmt::Display for Value {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
unsafe {
match self.t {
PrimitiveType::I32 => {
write!(f, "(i32:{})", self.v.i32)
}
PrimitiveType::I64 => {
write!(f, "(i64:{})", self.v.i64)
}
PrimitiveType::F32 => {
write!(f, "(f32:{})", self.v.f32)
}
PrimitiveType::F64 => {
write!(f, "(f64:{})", self.v.f64)
}
}
}
}
}
/// Represents expected runtime errors, i.e. problems with the program, not the interpreter
pub enum Trap {
MemoryOutOfBounds,
UndefinedDivision,
}
pub enum ControlInfo {
Branch(u32),
Return,
Trap(Trap),
None,
}
/// Representation of a wasm stack.
/// All functions use a new stack when called.
#[derive(Default)]
pub struct Stack {
values: Vec<Value>,
}
impl Stack {
fn new() -> Self {
Self::default()
}
fn push_value(&mut self, v: Value) {
log::debug!("Pushing {}", v);
self.values.push(v);
}
pub fn pop_value(&mut self) -> Result<Value, Error> {
log::debug!("Current stack len {}", self.values.len());
if self.values.is_empty() {
Err(Error::StackViolation)
} else {
unsafe { Ok(self.values.pop().unwrap_unchecked()) }
}
}
/// Return the 0-indexed offset'th value from the stack (such that 0 is the most recently pushed value)
pub fn fetch_value(&self, offset: usize) -> Result<&Value, Error> {
let stack_size = self.values.len();
let offset_to_fetch = stack_size - 1 - offset;
match self.values.get(offset_to_fetch) {
Some(n) => Ok(n),
None => {
log::debug!("Try to read {} stack size {}", offset_to_fetch, stack_size);
Err(Error::StackViolation)
}
}
}
pub fn assert_empty(&self) -> Result<(), Error> {
if self.values.is_empty() {
Ok(())
} else {
Err(Error::StackViolation)
}
}
}
impl std::fmt::Display for Stack {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "Current stack:\n[")?;
for v in self.values.iter() {
writeln!(f, " {}", v)?;
}
write!(f, "]\n\n")?;
Ok(())
}
}
pub trait Instruction {
/// A wasm instruction may modify any state of the program
fn execute(
&self,
stack: &mut Stack,
memory: &mut Memory,
locals: &mut Vec<Value>,
functions: &Vec<Function>,
) -> Result<ControlInfo, Error>;
}
pub mod inst;
#[derive(Default)]
struct Table {
functions: Vec<usize>,
}
pub struct Function {
r#type: FunctionType,
local_types: Vec<PrimitiveType>,
instructions: Vec<Box<dyn Instruction>>,
}
impl Function {
pub fn new(r#type: FunctionType) -> Self {
Self {
r#type,
local_types: Vec::new(),
instructions: Vec::new(),
}
}
pub fn push_inst(&mut self, i: Box<dyn Instruction>) {
self.instructions.push(i);
}
pub fn num_params(&self) -> usize {
self.r#type.num_params()
}
pub fn num_locals(&self) -> usize {
self.local_types.len()
}
pub fn new_locals(&mut self, count: usize, t: PrimitiveType) {
self.local_types.reserve(count);
for _ in 0..count {
self.local_types.push(t);
}
}
fn do_return(mut stack: Stack) -> Result<Value, Error> {
let ret = stack.pop_value();
stack.assert_empty()?;
ret
}
pub fn call(
&self,
functions: &Vec<Function>,
memory: &mut Memory,
args: Vec<Value>,
) -> Result<Value, Error> {
let mut stack = Stack::new();
let mut locals = Vec::with_capacity(self.num_params() + self.num_locals());
for arg in args {
locals.push(arg);
}
for t in &self.local_types {
locals.push(Value::from(t));
}
for instruction in &self.instructions {
match instruction.execute(&mut stack, memory, &mut locals, functions)? {
ControlInfo::Return => {
return Self::do_return(stack);
}
ControlInfo::Trap(Trap::MemoryOutOfBounds) => panic!(), //TODO: don't panic, handle traps gracefully
ControlInfo::Trap(Trap::UndefinedDivision) => panic!(),
_ => (),
};
}
Self::do_return(stack)
}
}
#[derive(Default)]
pub struct Memory {
bytes: Vec<u8>,
virtual_size_pages: u32,
upper_limit_pages: u32,
}
const PAGE_SIZE: u64 = 0x10000;
impl Memory {
pub fn new(min: u32, max: u32) -> Self {
let mut s = Self {
bytes: Vec::with_capacity((PAGE_SIZE * min as u64) as usize),
virtual_size_pages: min,
upper_limit_pages: max,
};
s.write(PAGE_SIZE * min as u64, 32, 4); // It looks like
s
}
pub fn write(&mut self, mut value: u64, bitwidth: u8, address: u64) -> Option<()> {
log::debug!(
"Write to address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
value
);
if bitwidth % 8!= 0 {
// Probably don't even need to implement this
panic!();
}
let bytes_to_write = bitwidth / 8;
let last_write_address = address + bytes_to_write as u64;
// Check for out of bounds access
if last_write_address > PAGE_SIZE * self.virtual_size_pages as u64 {
return None;
}
// Resize internal vector if needed
if self.bytes.is_empty() || last_write_address > (self.bytes.len() - 1) as u64 {
self.bytes.resize((last_write_address + 1) as usize, 0);
}
for i in (address..(address + bytes_to_write as u64)).rev() {
self.bytes[i as usize] = (value & 0xFF) as u8;
value >>= 8;
}
Some(())
}
pub fn read(
&mut self,
result_type: PrimitiveType,
bitwidth: u8,
address: u64,
) -> Option<Value> {
let bytes_to_read = (bitwidth / 8) as u64;
let mut result = 0_u64;
for i in address..(address + bytes_to_read) {
result <<= 8;
result += self.bytes[i as usize] as u64;
}
log::debug!(
"Read from address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
result
);
Some(Value::from_explicit_type(result_type, result))
}
}
#[derive(Default, Clone)]
pub struct FunctionType {
pub params: Vec<PrimitiveType>,
pub returns: Vec<PrimitiveType>,
}
impl FunctionType {
pub fn new(params: Vec<PrimitiveType>, returns: Vec<PrimitiveType>) -> Self {
Self { params, returns }
}
pub fn num_params(&self) -> usize {
self.params.len()
}
pub fn params_iter(&self) -> std::slice::Iter<PrimitiveType> {
self.params.iter()
}
}
pub enum Export {
Function(usize),
Table(usize),
Memory(usize),
Global(usize),
}
#[derive(Default)]
pub struct Module {
function_types: Vec<FunctionType>,
functions: Vec<Function>,
exports: HashMap<String, Export>,
table: Table,
memory: Memory,
globals: Vec<Value>,
}
impl Module {
pub fn new() -> Self {
Self::default()
}
pub fn call(&mut self, function_name: &str, args: Vec<Value>) -> Result<Value, Error> {
let function_index = match self.exports.get(function_name) {
Some(Export::Function(n)) => *n,
_ => return Err(Error::Misc("On module call, given name is not a function")),
};
let function = match self.functions.get(function_index) {
Some(n) => n,
None => {
return Err(Error::Misc(
"Function index given by export section is not valid",
))
}
};
function.call(&self.functions, &mut self.memory, args)
} | self.function_types.push(ft);
}
pub fn get_function_type(&self, i: usize) -> FunctionType {
self.function_types[i].clone()
}
pub fn add_function(&mut self, f: Function) {
self.functions.push(f);
}
pub fn add_memory(&mut self, m: Memory) {
self.memory = m;
}
pub fn add_export(&mut self, name: String, export: Export) -> Result<(), Error> {
if self.exports.contains_key(&name) {
return Err(Error::UnexpectedData("Expected a unique export name"));
}
self.exports.insert(name, export);
Ok(())
}
pub fn get_mut_function(&mut self, i: usize) -> &mut Function {
&mut self.functions[i]
}
} |
pub fn add_function_type(&mut self, ft: FunctionType) { | random_line_split |
wasm.rs | use std::collections::HashMap;
use std::convert::TryFrom;
use crate::error::Error;
/// The allowable types for any real value in wasm (u8 and others are packed)
#[derive(Copy, Clone, PartialEq)]
pub enum PrimitiveType {
I32,
I64,
F32,
F64,
}
impl From<i32> for PrimitiveType {
fn from(_: i32) -> PrimitiveType {
PrimitiveType::I32
}
}
impl From<i64> for PrimitiveType {
fn from(_: i64) -> PrimitiveType {
PrimitiveType::I64
}
}
impl From<f32> for PrimitiveType {
fn from(_: f32) -> PrimitiveType {
PrimitiveType::F32
}
}
impl From<f64> for PrimitiveType {
fn from(_: f64) -> PrimitiveType {
PrimitiveType::F64
}
}
/// Storage type for all wasm values
#[derive(Copy, Clone)]
pub union InternalValue {
i32: i32,
i64: i64,
f32: f32,
f64: f64,
}
impl From<i32> for InternalValue {
fn from(x: i32) -> InternalValue {
InternalValue { i32: x }
}
}
impl From<i64> for InternalValue {
fn from(x: i64) -> InternalValue {
InternalValue { i64: x }
}
}
impl From<f32> for InternalValue {
fn from(x: f32) -> InternalValue {
InternalValue { f32: x }
}
}
impl From<f64> for InternalValue {
fn from(x: f64) -> InternalValue {
InternalValue { f64: x }
}
}
/// Representation of all wasm values
#[derive(Copy, Clone)]
pub struct Value {
t: PrimitiveType,
v: InternalValue,
}
impl Value {
pub fn new<T: Into<InternalValue> + Into<PrimitiveType> + Copy>(x: T) -> Self {
Self {
t: x.into(),
v: x.into(),
}
}
pub fn from_explicit_type(t: PrimitiveType, v: u64) -> Value {
Self {
t,
v: InternalValue { i64: v as i64 },
}
}
#[inline]
pub fn as_i32_unchecked(&self) -> i32 |
#[inline]
pub fn as_i64_unchecked(&self) -> i64 {
unsafe { self.v.i64 }
}
#[inline]
pub fn as_f32_unchecked(&self) -> f32 {
unsafe { self.v.f32 }
}
#[inline]
pub fn as_f64_unchecked(&self) -> f64 {
unsafe { self.v.f64 }
}
}
impl From<i32> for Value {
fn from(v: i32) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<i64> for Value {
fn from(v: i64) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<f32> for Value {
fn from(v: f32) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<f64> for Value {
fn from(v: f64) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl TryFrom<Value> for u32 {
type Error = Error;
fn try_from(x: Value) -> Result<u32, Error> {
match x.t {
PrimitiveType::I32 => Ok(unsafe { x.v.i32 as u32 }),
_ => Err(Error::Misc("Cannot extract as u32 from incorrect type")),
}
}
}
impl From<&PrimitiveType> for Value {
fn from(x: &PrimitiveType) -> Value {
match x {
PrimitiveType::I32 => Value::new(0_i32),
PrimitiveType::I64 => Value::new(0_i64),
PrimitiveType::F32 => Value::new(0_f32),
PrimitiveType::F64 => Value::new(0_f64),
}
}
}
impl std::fmt::Display for Value {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
unsafe {
match self.t {
PrimitiveType::I32 => {
write!(f, "(i32:{})", self.v.i32)
}
PrimitiveType::I64 => {
write!(f, "(i64:{})", self.v.i64)
}
PrimitiveType::F32 => {
write!(f, "(f32:{})", self.v.f32)
}
PrimitiveType::F64 => {
write!(f, "(f64:{})", self.v.f64)
}
}
}
}
}
/// Represents expected runtime errors, i.e. problems with the program, not the interpreter
pub enum Trap {
MemoryOutOfBounds,
UndefinedDivision,
}
pub enum ControlInfo {
Branch(u32),
Return,
Trap(Trap),
None,
}
/// Representation of a wasm stack.
/// All functions use a new stack when called.
#[derive(Default)]
pub struct Stack {
values: Vec<Value>,
}
impl Stack {
fn new() -> Self {
Self::default()
}
fn push_value(&mut self, v: Value) {
log::debug!("Pushing {}", v);
self.values.push(v);
}
pub fn pop_value(&mut self) -> Result<Value, Error> {
log::debug!("Current stack len {}", self.values.len());
if self.values.is_empty() {
Err(Error::StackViolation)
} else {
unsafe { Ok(self.values.pop().unwrap_unchecked()) }
}
}
/// Return the 0-indexed offset'th value from the stack (such that 0 is the most recently pushed value)
pub fn fetch_value(&self, offset: usize) -> Result<&Value, Error> {
let stack_size = self.values.len();
let offset_to_fetch = stack_size - 1 - offset;
match self.values.get(offset_to_fetch) {
Some(n) => Ok(n),
None => {
log::debug!("Try to read {} stack size {}", offset_to_fetch, stack_size);
Err(Error::StackViolation)
}
}
}
pub fn assert_empty(&self) -> Result<(), Error> {
if self.values.is_empty() {
Ok(())
} else {
Err(Error::StackViolation)
}
}
}
impl std::fmt::Display for Stack {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "Current stack:\n[")?;
for v in self.values.iter() {
writeln!(f, " {}", v)?;
}
write!(f, "]\n\n")?;
Ok(())
}
}
pub trait Instruction {
/// A wasm instruction may modify any state of the program
fn execute(
&self,
stack: &mut Stack,
memory: &mut Memory,
locals: &mut Vec<Value>,
functions: &Vec<Function>,
) -> Result<ControlInfo, Error>;
}
pub mod inst;
#[derive(Default)]
struct Table {
functions: Vec<usize>,
}
pub struct Function {
r#type: FunctionType,
local_types: Vec<PrimitiveType>,
instructions: Vec<Box<dyn Instruction>>,
}
impl Function {
pub fn new(r#type: FunctionType) -> Self {
Self {
r#type,
local_types: Vec::new(),
instructions: Vec::new(),
}
}
pub fn push_inst(&mut self, i: Box<dyn Instruction>) {
self.instructions.push(i);
}
pub fn num_params(&self) -> usize {
self.r#type.num_params()
}
pub fn num_locals(&self) -> usize {
self.local_types.len()
}
pub fn new_locals(&mut self, count: usize, t: PrimitiveType) {
self.local_types.reserve(count);
for _ in 0..count {
self.local_types.push(t);
}
}
fn do_return(mut stack: Stack) -> Result<Value, Error> {
let ret = stack.pop_value();
stack.assert_empty()?;
ret
}
pub fn call(
&self,
functions: &Vec<Function>,
memory: &mut Memory,
args: Vec<Value>,
) -> Result<Value, Error> {
let mut stack = Stack::new();
let mut locals = Vec::with_capacity(self.num_params() + self.num_locals());
for arg in args {
locals.push(arg);
}
for t in &self.local_types {
locals.push(Value::from(t));
}
for instruction in &self.instructions {
match instruction.execute(&mut stack, memory, &mut locals, functions)? {
ControlInfo::Return => {
return Self::do_return(stack);
}
ControlInfo::Trap(Trap::MemoryOutOfBounds) => panic!(), //TODO: don't panic, handle traps gracefully
ControlInfo::Trap(Trap::UndefinedDivision) => panic!(),
_ => (),
};
}
Self::do_return(stack)
}
}
#[derive(Default)]
pub struct Memory {
bytes: Vec<u8>,
virtual_size_pages: u32,
upper_limit_pages: u32,
}
const PAGE_SIZE: u64 = 0x10000;
impl Memory {
pub fn new(min: u32, max: u32) -> Self {
let mut s = Self {
bytes: Vec::with_capacity((PAGE_SIZE * min as u64) as usize),
virtual_size_pages: min,
upper_limit_pages: max,
};
s.write(PAGE_SIZE * min as u64, 32, 4); // It looks like
s
}
pub fn write(&mut self, mut value: u64, bitwidth: u8, address: u64) -> Option<()> {
log::debug!(
"Write to address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
value
);
if bitwidth % 8!= 0 {
// Probably don't even need to implement this
panic!();
}
let bytes_to_write = bitwidth / 8;
let last_write_address = address + bytes_to_write as u64;
// Check for out of bounds access
if last_write_address > PAGE_SIZE * self.virtual_size_pages as u64 {
return None;
}
// Resize internal vector if needed
if self.bytes.is_empty() || last_write_address > (self.bytes.len() - 1) as u64 {
self.bytes.resize((last_write_address + 1) as usize, 0);
}
for i in (address..(address + bytes_to_write as u64)).rev() {
self.bytes[i as usize] = (value & 0xFF) as u8;
value >>= 8;
}
Some(())
}
pub fn read(
&mut self,
result_type: PrimitiveType,
bitwidth: u8,
address: u64,
) -> Option<Value> {
let bytes_to_read = (bitwidth / 8) as u64;
let mut result = 0_u64;
for i in address..(address + bytes_to_read) {
result <<= 8;
result += self.bytes[i as usize] as u64;
}
log::debug!(
"Read from address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
result
);
Some(Value::from_explicit_type(result_type, result))
}
}
#[derive(Default, Clone)]
pub struct FunctionType {
pub params: Vec<PrimitiveType>,
pub returns: Vec<PrimitiveType>,
}
impl FunctionType {
pub fn new(params: Vec<PrimitiveType>, returns: Vec<PrimitiveType>) -> Self {
Self { params, returns }
}
pub fn num_params(&self) -> usize {
self.params.len()
}
pub fn params_iter(&self) -> std::slice::Iter<PrimitiveType> {
self.params.iter()
}
}
pub enum Export {
Function(usize),
Table(usize),
Memory(usize),
Global(usize),
}
#[derive(Default)]
pub struct Module {
function_types: Vec<FunctionType>,
functions: Vec<Function>,
exports: HashMap<String, Export>,
table: Table,
memory: Memory,
globals: Vec<Value>,
}
impl Module {
pub fn new() -> Self {
Self::default()
}
pub fn call(&mut self, function_name: &str, args: Vec<Value>) -> Result<Value, Error> {
let function_index = match self.exports.get(function_name) {
Some(Export::Function(n)) => *n,
_ => return Err(Error::Misc("On module call, given name is not a function")),
};
let function = match self.functions.get(function_index) {
Some(n) => n,
None => {
return Err(Error::Misc(
"Function index given by export section is not valid",
))
}
};
function.call(&self.functions, &mut self.memory, args)
}
pub fn add_function_type(&mut self, ft: FunctionType) {
self.function_types.push(ft);
}
pub fn get_function_type(&self, i: usize) -> FunctionType {
self.function_types[i].clone()
}
pub fn add_function(&mut self, f: Function) {
self.functions.push(f);
}
pub fn add_memory(&mut self, m: Memory) {
self.memory = m;
}
pub fn add_export(&mut self, name: String, export: Export) -> Result<(), Error> {
if self.exports.contains_key(&name) {
return Err(Error::UnexpectedData("Expected a unique export name"));
}
self.exports.insert(name, export);
Ok(())
}
pub fn get_mut_function(&mut self, i: usize) -> &mut Function {
&mut self.functions[i]
}
}
| {
unsafe { self.v.i32 }
} | identifier_body |
cargo_test.rs | use crate::core::compiler::{Compilation, CompileKind, Doctest, Metadata, Unit, UnitOutput};
use crate::core::shell::Verbosity;
use crate::core::{TargetKind, Workspace};
use crate::ops;
use crate::util::errors::CargoResult;
use crate::util::{add_path_args, CargoTestError, Config, Test};
use cargo_util::{ProcessBuilder, ProcessError};
use std::ffi::OsString;
use std::path::{Path, PathBuf};
pub struct TestOptions {
pub compile_opts: ops::CompileOptions,
pub no_run: bool,
pub no_fail_fast: bool,
}
pub fn run_tests(
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if!options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, test_args, &compilation, "unittests")?;
}
return Ok(None);
}
let (test, mut errors) = run_unit_tests(ws.config(), options, test_args, &compilation)?;
// If we have an error and want to fail fast, then return.
if!errors.is_empty() &&!options.no_fail_fast {
return Ok(Some(CargoTestError::new(test, errors)));
}
let (doctest, docerrors) = run_doc_tests(ws, options, test_args, &compilation)?;
let test = if docerrors.is_empty() | else { doctest };
errors.extend(docerrors);
if errors.is_empty() {
Ok(None)
} else {
Ok(Some(CargoTestError::new(test, errors)))
}
}
pub fn run_benches(
ws: &Workspace<'_>,
options: &TestOptions,
args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if!options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, args, &compilation, "benches")?;
}
return Ok(None);
}
let mut args = args.to_vec();
args.push("--bench");
let (test, errors) = run_unit_tests(ws.config(), options, &args, &compilation)?;
match errors.len() {
0 => Ok(None),
_ => Ok(Some(CargoTestError::new(test, errors))),
}
}
fn compile_tests<'a>(ws: &Workspace<'a>, options: &TestOptions) -> CargoResult<Compilation<'a>> {
let mut compilation = ops::compile(ws, &options.compile_opts)?;
compilation.tests.sort();
Ok(compilation)
}
/// Runs the unit and integration tests of a package.
fn run_unit_tests(
config: &Config,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let cwd = config.cwd();
let mut errors = Vec::new();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
"unittests",
)?;
config
.shell()
.concise(|shell| shell.status("Running", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Running", &cmd))?;
let result = cmd.exec();
if let Err(e) = result {
let e = e.downcast::<ProcessError>()?;
errors.push((
unit.target.kind().clone(),
unit.target.name().to_string(),
unit.pkg.name().to_string(),
e,
));
if!options.no_fail_fast {
break;
}
}
}
if errors.len() == 1 {
let (kind, name, pkg_name, e) = errors.pop().unwrap();
Ok((
Test::UnitTest {
kind,
name,
pkg_name,
},
vec![e],
))
} else {
Ok((
Test::Multiple,
errors.into_iter().map(|(_, _, _, e)| e).collect(),
))
}
}
fn run_doc_tests(
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let config = ws.config();
let mut errors = Vec::new();
let doctest_xcompile = config.cli_unstable().doctest_xcompile;
let doctest_in_workspace = config.cli_unstable().doctest_in_workspace;
for doctest_info in &compilation.to_doc_test {
let Doctest {
args,
unstable_opts,
unit,
linker,
script_meta,
env,
} = doctest_info;
if!doctest_xcompile {
match unit.kind {
CompileKind::Host => {}
CompileKind::Target(target) => {
if target.short_name()!= compilation.host {
// Skip doctests, -Zdoctest-xcompile not enabled.
config.shell().verbose(|shell| {
shell.note(format!(
"skipping doctests for {} ({}), \
cross-compilation doctests are not yet supported\n\
See https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#doctest-xcompile \
for more information.",
unit.pkg,
unit.target.description_named()
))
})?;
continue;
}
}
}
}
config.shell().status("Doc-tests", unit.target.name())?;
let mut p = compilation.rustdoc_process(unit, *script_meta)?;
for (var, value) in env {
p.env(var, value);
}
p.arg("--crate-name").arg(&unit.target.crate_name());
p.arg("--test");
if doctest_in_workspace {
add_path_args(ws, unit, &mut p);
// FIXME(swatinem): remove the `unstable-options` once rustdoc stabilizes the `test-run-directory` option
p.arg("-Z").arg("unstable-options");
p.arg("--test-run-directory")
.arg(unit.pkg.root().to_path_buf());
} else {
p.arg(unit.target.src_path().path().unwrap());
}
if let CompileKind::Target(target) = unit.kind {
// use `rustc_target()` to properly handle JSON target paths
p.arg("--target").arg(target.rustc_target());
}
if doctest_xcompile {
p.arg("-Zunstable-options");
p.arg("--enable-per-target-ignores");
if let Some((runtool, runtool_args)) = compilation.target_runner(unit.kind) {
p.arg("--runtool").arg(runtool);
for arg in runtool_args {
p.arg("--runtool-arg").arg(arg);
}
}
if let Some(linker) = linker {
let mut joined = OsString::from("linker=");
joined.push(linker);
p.arg("-C").arg(joined);
}
}
for &rust_dep in &[
&compilation.deps_output[&unit.kind],
&compilation.deps_output[&CompileKind::Host],
] {
let mut arg = OsString::from("dependency=");
arg.push(rust_dep);
p.arg("-L").arg(arg);
}
for native_dep in compilation.native_dirs.iter() {
p.arg("-L").arg(native_dep);
}
for arg in test_args {
p.arg("--test-args").arg(arg);
}
if config.shell().verbosity() == Verbosity::Quiet {
p.arg("--test-args").arg("--quiet");
}
p.args(args);
if *unstable_opts {
p.arg("-Zunstable-options");
}
config
.shell()
.verbose(|shell| shell.status("Running", p.to_string()))?;
if let Err(e) = p.exec() {
let e = e.downcast::<ProcessError>()?;
errors.push(e);
if!options.no_fail_fast {
return Ok((Test::Doc, errors));
}
}
}
Ok((Test::Doc, errors))
}
fn display_no_run_information(
ws: &Workspace<'_>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<()> {
let config = ws.config();
let cwd = config.cwd();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
exec_type,
)?;
config
.shell()
.concise(|shell| shell.status("Executable", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Executable", &cmd))?;
}
return Ok(());
}
fn cmd_builds(
config: &Config,
cwd: &Path,
unit: &Unit,
path: &PathBuf,
script_meta: &Option<Metadata>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<(String, ProcessBuilder)> {
let test_path = unit.target.src_path().path().unwrap();
let short_test_path = test_path
.strip_prefix(unit.pkg.root())
.unwrap_or(test_path)
.display();
let exe_display = match unit.target.kind() {
TargetKind::Test | TargetKind::Bench => format!(
"{} ({})",
short_test_path,
path.strip_prefix(cwd).unwrap_or(path).display()
),
_ => format!(
"{} {} ({})",
exec_type,
short_test_path,
path.strip_prefix(cwd).unwrap_or(path).display()
),
};
let mut cmd = compilation.target_process(path, unit.kind, &unit.pkg, *script_meta)?;
cmd.args(test_args);
if unit.target.harness() && config.shell().verbosity() == Verbosity::Quiet {
cmd.arg("--quiet");
}
Ok((exe_display, cmd))
}
| { test } | conditional_block |
cargo_test.rs | use crate::core::compiler::{Compilation, CompileKind, Doctest, Metadata, Unit, UnitOutput};
use crate::core::shell::Verbosity;
use crate::core::{TargetKind, Workspace};
use crate::ops;
use crate::util::errors::CargoResult;
use crate::util::{add_path_args, CargoTestError, Config, Test};
use cargo_util::{ProcessBuilder, ProcessError};
use std::ffi::OsString;
use std::path::{Path, PathBuf};
pub struct TestOptions {
pub compile_opts: ops::CompileOptions,
pub no_run: bool,
pub no_fail_fast: bool,
}
pub fn run_tests(
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if!options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, test_args, &compilation, "unittests")?;
}
return Ok(None);
}
let (test, mut errors) = run_unit_tests(ws.config(), options, test_args, &compilation)?;
// If we have an error and want to fail fast, then return.
if!errors.is_empty() &&!options.no_fail_fast {
return Ok(Some(CargoTestError::new(test, errors)));
}
let (doctest, docerrors) = run_doc_tests(ws, options, test_args, &compilation)?;
let test = if docerrors.is_empty() { test } else { doctest };
errors.extend(docerrors);
if errors.is_empty() {
Ok(None)
} else {
Ok(Some(CargoTestError::new(test, errors)))
}
}
pub fn run_benches(
ws: &Workspace<'_>,
options: &TestOptions,
args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if!options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, args, &compilation, "benches")?;
}
return Ok(None);
}
let mut args = args.to_vec();
args.push("--bench");
let (test, errors) = run_unit_tests(ws.config(), options, &args, &compilation)?;
match errors.len() { | _ => Ok(Some(CargoTestError::new(test, errors))),
}
}
fn compile_tests<'a>(ws: &Workspace<'a>, options: &TestOptions) -> CargoResult<Compilation<'a>> {
let mut compilation = ops::compile(ws, &options.compile_opts)?;
compilation.tests.sort();
Ok(compilation)
}
/// Runs the unit and integration tests of a package.
fn run_unit_tests(
config: &Config,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let cwd = config.cwd();
let mut errors = Vec::new();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
"unittests",
)?;
config
.shell()
.concise(|shell| shell.status("Running", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Running", &cmd))?;
let result = cmd.exec();
if let Err(e) = result {
let e = e.downcast::<ProcessError>()?;
errors.push((
unit.target.kind().clone(),
unit.target.name().to_string(),
unit.pkg.name().to_string(),
e,
));
if!options.no_fail_fast {
break;
}
}
}
if errors.len() == 1 {
let (kind, name, pkg_name, e) = errors.pop().unwrap();
Ok((
Test::UnitTest {
kind,
name,
pkg_name,
},
vec![e],
))
} else {
Ok((
Test::Multiple,
errors.into_iter().map(|(_, _, _, e)| e).collect(),
))
}
}
fn run_doc_tests(
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let config = ws.config();
let mut errors = Vec::new();
let doctest_xcompile = config.cli_unstable().doctest_xcompile;
let doctest_in_workspace = config.cli_unstable().doctest_in_workspace;
for doctest_info in &compilation.to_doc_test {
let Doctest {
args,
unstable_opts,
unit,
linker,
script_meta,
env,
} = doctest_info;
if!doctest_xcompile {
match unit.kind {
CompileKind::Host => {}
CompileKind::Target(target) => {
if target.short_name()!= compilation.host {
// Skip doctests, -Zdoctest-xcompile not enabled.
config.shell().verbose(|shell| {
shell.note(format!(
"skipping doctests for {} ({}), \
cross-compilation doctests are not yet supported\n\
See https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#doctest-xcompile \
for more information.",
unit.pkg,
unit.target.description_named()
))
})?;
continue;
}
}
}
}
config.shell().status("Doc-tests", unit.target.name())?;
let mut p = compilation.rustdoc_process(unit, *script_meta)?;
for (var, value) in env {
p.env(var, value);
}
p.arg("--crate-name").arg(&unit.target.crate_name());
p.arg("--test");
if doctest_in_workspace {
add_path_args(ws, unit, &mut p);
// FIXME(swatinem): remove the `unstable-options` once rustdoc stabilizes the `test-run-directory` option
p.arg("-Z").arg("unstable-options");
p.arg("--test-run-directory")
.arg(unit.pkg.root().to_path_buf());
} else {
p.arg(unit.target.src_path().path().unwrap());
}
if let CompileKind::Target(target) = unit.kind {
// use `rustc_target()` to properly handle JSON target paths
p.arg("--target").arg(target.rustc_target());
}
if doctest_xcompile {
p.arg("-Zunstable-options");
p.arg("--enable-per-target-ignores");
if let Some((runtool, runtool_args)) = compilation.target_runner(unit.kind) {
p.arg("--runtool").arg(runtool);
for arg in runtool_args {
p.arg("--runtool-arg").arg(arg);
}
}
if let Some(linker) = linker {
let mut joined = OsString::from("linker=");
joined.push(linker);
p.arg("-C").arg(joined);
}
}
for &rust_dep in &[
&compilation.deps_output[&unit.kind],
&compilation.deps_output[&CompileKind::Host],
] {
let mut arg = OsString::from("dependency=");
arg.push(rust_dep);
p.arg("-L").arg(arg);
}
for native_dep in compilation.native_dirs.iter() {
p.arg("-L").arg(native_dep);
}
for arg in test_args {
p.arg("--test-args").arg(arg);
}
if config.shell().verbosity() == Verbosity::Quiet {
p.arg("--test-args").arg("--quiet");
}
p.args(args);
if *unstable_opts {
p.arg("-Zunstable-options");
}
config
.shell()
.verbose(|shell| shell.status("Running", p.to_string()))?;
if let Err(e) = p.exec() {
let e = e.downcast::<ProcessError>()?;
errors.push(e);
if!options.no_fail_fast {
return Ok((Test::Doc, errors));
}
}
}
Ok((Test::Doc, errors))
}
fn display_no_run_information(
ws: &Workspace<'_>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<()> {
let config = ws.config();
let cwd = config.cwd();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
exec_type,
)?;
config
.shell()
.concise(|shell| shell.status("Executable", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Executable", &cmd))?;
}
return Ok(());
}
fn cmd_builds(
config: &Config,
cwd: &Path,
unit: &Unit,
path: &PathBuf,
script_meta: &Option<Metadata>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<(String, ProcessBuilder)> {
let test_path = unit.target.src_path().path().unwrap();
let short_test_path = test_path
.strip_prefix(unit.pkg.root())
.unwrap_or(test_path)
.display();
let exe_display = match unit.target.kind() {
TargetKind::Test | TargetKind::Bench => format!(
"{} ({})",
short_test_path,
path.strip_prefix(cwd).unwrap_or(path).display()
),
_ => format!(
"{} {} ({})",
exec_type,
short_test_path,
path.strip_prefix(cwd).unwrap_or(path).display()
),
};
let mut cmd = compilation.target_process(path, unit.kind, &unit.pkg, *script_meta)?;
cmd.args(test_args);
if unit.target.harness() && config.shell().verbosity() == Verbosity::Quiet {
cmd.arg("--quiet");
}
Ok((exe_display, cmd))
} | 0 => Ok(None), | random_line_split |
cargo_test.rs | use crate::core::compiler::{Compilation, CompileKind, Doctest, Metadata, Unit, UnitOutput};
use crate::core::shell::Verbosity;
use crate::core::{TargetKind, Workspace};
use crate::ops;
use crate::util::errors::CargoResult;
use crate::util::{add_path_args, CargoTestError, Config, Test};
use cargo_util::{ProcessBuilder, ProcessError};
use std::ffi::OsString;
use std::path::{Path, PathBuf};
pub struct TestOptions {
pub compile_opts: ops::CompileOptions,
pub no_run: bool,
pub no_fail_fast: bool,
}
pub fn run_tests(
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if!options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, test_args, &compilation, "unittests")?;
}
return Ok(None);
}
let (test, mut errors) = run_unit_tests(ws.config(), options, test_args, &compilation)?;
// If we have an error and want to fail fast, then return.
if!errors.is_empty() &&!options.no_fail_fast {
return Ok(Some(CargoTestError::new(test, errors)));
}
let (doctest, docerrors) = run_doc_tests(ws, options, test_args, &compilation)?;
let test = if docerrors.is_empty() { test } else { doctest };
errors.extend(docerrors);
if errors.is_empty() {
Ok(None)
} else {
Ok(Some(CargoTestError::new(test, errors)))
}
}
pub fn run_benches(
ws: &Workspace<'_>,
options: &TestOptions,
args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if!options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, args, &compilation, "benches")?;
}
return Ok(None);
}
let mut args = args.to_vec();
args.push("--bench");
let (test, errors) = run_unit_tests(ws.config(), options, &args, &compilation)?;
match errors.len() {
0 => Ok(None),
_ => Ok(Some(CargoTestError::new(test, errors))),
}
}
fn compile_tests<'a>(ws: &Workspace<'a>, options: &TestOptions) -> CargoResult<Compilation<'a>> {
let mut compilation = ops::compile(ws, &options.compile_opts)?;
compilation.tests.sort();
Ok(compilation)
}
/// Runs the unit and integration tests of a package.
fn run_unit_tests(
config: &Config,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let cwd = config.cwd();
let mut errors = Vec::new();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
"unittests",
)?;
config
.shell()
.concise(|shell| shell.status("Running", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Running", &cmd))?;
let result = cmd.exec();
if let Err(e) = result {
let e = e.downcast::<ProcessError>()?;
errors.push((
unit.target.kind().clone(),
unit.target.name().to_string(),
unit.pkg.name().to_string(),
e,
));
if!options.no_fail_fast {
break;
}
}
}
if errors.len() == 1 {
let (kind, name, pkg_name, e) = errors.pop().unwrap();
Ok((
Test::UnitTest {
kind,
name,
pkg_name,
},
vec![e],
))
} else {
Ok((
Test::Multiple,
errors.into_iter().map(|(_, _, _, e)| e).collect(),
))
}
}
fn | (
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let config = ws.config();
let mut errors = Vec::new();
let doctest_xcompile = config.cli_unstable().doctest_xcompile;
let doctest_in_workspace = config.cli_unstable().doctest_in_workspace;
for doctest_info in &compilation.to_doc_test {
let Doctest {
args,
unstable_opts,
unit,
linker,
script_meta,
env,
} = doctest_info;
if!doctest_xcompile {
match unit.kind {
CompileKind::Host => {}
CompileKind::Target(target) => {
if target.short_name()!= compilation.host {
// Skip doctests, -Zdoctest-xcompile not enabled.
config.shell().verbose(|shell| {
shell.note(format!(
"skipping doctests for {} ({}), \
cross-compilation doctests are not yet supported\n\
See https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#doctest-xcompile \
for more information.",
unit.pkg,
unit.target.description_named()
))
})?;
continue;
}
}
}
}
config.shell().status("Doc-tests", unit.target.name())?;
let mut p = compilation.rustdoc_process(unit, *script_meta)?;
for (var, value) in env {
p.env(var, value);
}
p.arg("--crate-name").arg(&unit.target.crate_name());
p.arg("--test");
if doctest_in_workspace {
add_path_args(ws, unit, &mut p);
// FIXME(swatinem): remove the `unstable-options` once rustdoc stabilizes the `test-run-directory` option
p.arg("-Z").arg("unstable-options");
p.arg("--test-run-directory")
.arg(unit.pkg.root().to_path_buf());
} else {
p.arg(unit.target.src_path().path().unwrap());
}
if let CompileKind::Target(target) = unit.kind {
// use `rustc_target()` to properly handle JSON target paths
p.arg("--target").arg(target.rustc_target());
}
if doctest_xcompile {
p.arg("-Zunstable-options");
p.arg("--enable-per-target-ignores");
if let Some((runtool, runtool_args)) = compilation.target_runner(unit.kind) {
p.arg("--runtool").arg(runtool);
for arg in runtool_args {
p.arg("--runtool-arg").arg(arg);
}
}
if let Some(linker) = linker {
let mut joined = OsString::from("linker=");
joined.push(linker);
p.arg("-C").arg(joined);
}
}
for &rust_dep in &[
&compilation.deps_output[&unit.kind],
&compilation.deps_output[&CompileKind::Host],
] {
let mut arg = OsString::from("dependency=");
arg.push(rust_dep);
p.arg("-L").arg(arg);
}
for native_dep in compilation.native_dirs.iter() {
p.arg("-L").arg(native_dep);
}
for arg in test_args {
p.arg("--test-args").arg(arg);
}
if config.shell().verbosity() == Verbosity::Quiet {
p.arg("--test-args").arg("--quiet");
}
p.args(args);
if *unstable_opts {
p.arg("-Zunstable-options");
}
config
.shell()
.verbose(|shell| shell.status("Running", p.to_string()))?;
if let Err(e) = p.exec() {
let e = e.downcast::<ProcessError>()?;
errors.push(e);
if!options.no_fail_fast {
return Ok((Test::Doc, errors));
}
}
}
Ok((Test::Doc, errors))
}
fn display_no_run_information(
ws: &Workspace<'_>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<()> {
let config = ws.config();
let cwd = config.cwd();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
exec_type,
)?;
config
.shell()
.concise(|shell| shell.status("Executable", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Executable", &cmd))?;
}
return Ok(());
}
fn cmd_builds(
config: &Config,
cwd: &Path,
unit: &Unit,
path: &PathBuf,
script_meta: &Option<Metadata>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<(String, ProcessBuilder)> {
let test_path = unit.target.src_path().path().unwrap();
let short_test_path = test_path
.strip_prefix(unit.pkg.root())
.unwrap_or(test_path)
.display();
let exe_display = match unit.target.kind() {
TargetKind::Test | TargetKind::Bench => format!(
"{} ({})",
short_test_path,
path.strip_prefix(cwd).unwrap_or(path).display()
),
_ => format!(
"{} {} ({})",
exec_type,
short_test_path,
path.strip_prefix(cwd).unwrap_or(path).display()
),
};
let mut cmd = compilation.target_process(path, unit.kind, &unit.pkg, *script_meta)?;
cmd.args(test_args);
if unit.target.harness() && config.shell().verbosity() == Verbosity::Quiet {
cmd.arg("--quiet");
}
Ok((exe_display, cmd))
}
| run_doc_tests | identifier_name |
lib.rs | 32),
// width and height of character in texture units
tex_size: (f32, f32),
// size of the character in EMs
size: (f32, f32),
// number of EMs between the bottom of the character and the base line of text
height_over_line: f32,
// number of EMs at the left of the character
left_padding: f32,
// number of EMs at the right of the character
right_padding: f32,
}
struct TextureData {
data: Vec<f32>,
width: u32,
height: u32,
}
impl<'a> glium::texture::Texture2dDataSource<'a> for &'a TextureData {
type Data = f32;
fn into_raw(self) -> glium::texture::RawImage2d<'a, f32> {
glium::texture::RawImage2d {
data: Cow::Borrowed(&self.data),
width: self.width,
height: self.height,
format: glium::texture::ClientFormat::F32,
}
}
}
#[derive(Copy, Clone)]
struct VertexFormat {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(VertexFormat, position, tex_coords);
impl FontTexture {
/// Creates a new texture representing a font stored in a `FontTexture`.
pub fn new<R, F>(facade: &F, font: R, font_size: u32)
-> Result<FontTexture, ()> where R: Read, F: Facade
{
// building the freetype library
// FIXME: call FT_Done_Library
let library = unsafe {
// taken from https://github.com/PistonDevelopers/freetype-rs/blob/master/src/library.rs
extern "C" fn alloc_library(_memory: freetype::FT_Memory, size: libc::c_long) -> *mut libc::c_void {
unsafe {
libc::malloc(size as libc::size_t)
}
}
extern "C" fn free_library(_memory: freetype::FT_Memory, block: *mut libc::c_void) {
unsafe {
libc::free(block)
}
}
extern "C" fn realloc_library(_memory: freetype::FT_Memory,
_cur_size: libc::c_long,
new_size: libc::c_long,
block: *mut libc::c_void) -> *mut libc::c_void {
unsafe {
libc::realloc(block, new_size as libc::size_t)
}
}
static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec {
user: 0 as *mut libc::c_void,
alloc: alloc_library,
free: free_library,
realloc: realloc_library,
};
let mut raw = ::std::ptr::null_mut();
if freetype::FT_New_Library(&mut MEMORY, &mut raw)!= freetype::FT_Err_Ok {
return Err(());
}
freetype::FT_Add_Default_Modules(raw);
raw
};
// building the freetype face object
let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect();
let face: freetype::FT_Face = unsafe {
let mut face = ::std::ptr::null_mut();
let err = freetype::FT_New_Memory_Face(library, font.as_ptr(),
font.len() as freetype::FT_Long, 0, &mut face);
if err == freetype::FT_Err_Ok {
face
} else {
return Err(());
}
};
// computing the list of characters in the font
let characters_list = unsafe {
// TODO: unresolved symbol
/*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE)!= 0 {
return Err(());
}*/
let mut result = Vec::new();
let mut g: freetype::FT_UInt = std::mem::uninitialized();
let mut c = freetype::FT_Get_First_Char(face, &mut g);
while g!= 0 {
result.push(std::mem::transmute(c as u32)); // TODO: better solution?
c = freetype::FT_Get_Next_Char(face, c, &mut g);
}
result
};
// building the infos
let (texture_data, chr_infos, em_pixels) = unsafe {
build_font_image(face, characters_list, font_size)
};
// we load the texture in the display
let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap(); | texture: texture,
character_infos: chr_infos,
em_pixels: em_pixels,
})
}
/// Return the size of an em-unit for the generated font texture.
/// This is needed for a pixel-perfect display: the text geometry is scaled so that
/// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels.
pub fn em_pixels(&self) -> u32 {
self.em_pixels
}
}
/*impl glium::uniforms::AsUniformValue for FontTexture {
fn as_uniform_value(&self) -> glium::uniforms::UniformValue {
glium::uniforms::AsUniformValue::as_uniform_value(&self.texture)
}
}*/
impl TextSystem {
/// Builds a new text system that must be used to build `TextDisplay` objects.
pub fn new<F>(facade: &F) -> TextSystem where F: Facade {
TextSystem {
context: facade.get_context().clone(),
program: program!(facade,
140 => {
vertex: "
#version 140
uniform mat4 matrix;
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
gl_Position = matrix * vec4(position, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 140
in vec2 v_tex_coords;
out vec4 f_color;
uniform vec4 color;
uniform sampler2D tex;
void main() {
vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords));
if (c.a <= 0.01) {
discard;
} else {
f_color = c;
}
}
"
},
110 => {
vertex: "
#version 110
attribute vec2 position;
attribute vec2 tex_coords;
varying vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 110
varying vec2 v_tex_coords;
uniform vec4 color;
uniform sampler2D tex;
void main() {
gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords));
if (gl_FragColor.a <= 0.01) {
discard;
}
}
"
},
).unwrap()
}
}
}
impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> {
/// Builds a new text display that allows you to draw text.
pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> {
let mut text_display = TextDisplay {
context: system.context.clone(),
texture: texture,
vertex_buffer: None,
index_buffer: None,
char_pos_x: vec![],
is_empty: true,
};
text_display.set_text(text);
text_display
}
/// Return the x-positions (in em-units) of the breaks between characters.
/// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character.
/// The last value of the array is the x-pos of the end of the string
pub fn get_char_pos_x(&self) -> &[f32] {
&self.char_pos_x
}
/// Modifies the text on this display.
pub fn set_text(&mut self, text: &str) {
self.is_empty = true;
self.char_pos_x = vec![0.];
self.vertex_buffer = None;
self.index_buffer = None;
// returning if no text
if text.len() == 0 {
return;
}
// these arrays will contain the vertex buffer and index buffer data
let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4);
let mut index_buffer_data = Vec::with_capacity(text.len() * 6);
// iterating over the characters of the string
let mut pos_x = 0.;
for character in text.chars() { // FIXME: wrong, but only thing stable
let infos = match self.texture.character_infos
.iter().find(|&&(chr, _)| chr == character)
{
Some(infos) => infos,
None => continue // character not found in the font, ignoring it
};
let infos = infos.1;
self.is_empty = false;
// adding the quad in the index buffer
{
let first_vertex_offset = vertex_buffer_data.len() as u16;
index_buffer_data.push(first_vertex_offset);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 3);
}
//
pos_x += infos.left_padding;
// calculating coords
let left_coord = pos_x;
let right_coord = left_coord + infos.size.0;
let top_coord = infos.height_over_line;
let bottom_coord = infos.height_over_line - infos.size.1;
// top-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, top_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1],
});
// top-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, top_coord],
tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1],
});
// bottom-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, bottom_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1],
});
// bottom-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, bottom_coord],
tex_coords: [
infos.tex_coords.0 + infos.tex_size.0,
infos.tex_coords.1 + infos.tex_size.1
],
});
// going to next char
pos_x = right_coord + infos.right_padding;
for _ in 0..character.len_utf8() {
self.char_pos_x.push(pos_x);
}
}
if!vertex_buffer_data.len()!= 0 {
// building the vertex buffer
self.vertex_buffer = Some(glium::VertexBuffer::new(&self.context,
&vertex_buffer_data).unwrap());
// building the index buffer
self.index_buffer = Some(glium::IndexBuffer::new(&self.context,
glium::index::PrimitiveType::TrianglesList,
&index_buffer_data).unwrap());
}
}
}
///
/// ## About the matrix
///
/// The matrix must be column-major post-muliplying (which is the usual way to do in OpenGL).
///
/// One unit in height corresponds to a line of text, but the text can go above or under.
/// The bottom of the line is at `0.0`, the top is at `1.0`.
/// You need to adapt your matrix by taking these into consideration.
pub fn draw<F, S:?Sized, M>(text: &TextDisplay<F>, system: &TextSystem, target: &mut S,
matrix: M, color: (f32, f32, f32, f32))
where S: glium::Surface, M: Into<[[f32; 4]; 4]>,
F: Deref<Target=FontTexture>
{
let matrix = matrix.into();
let &TextDisplay { ref vertex_buffer, ref index_buffer, ref texture, is_empty,.. } = text;
let color = [color.0, color.1, color.2, color.3];
// returning if nothing to draw
if is_empty || vertex_buffer.is_none() || index_buffer.is_none() {
return;
}
let vertex_buffer = vertex_buffer.as_ref().unwrap();
let index_buffer = index_buffer.as_ref().unwrap();
let uniforms = uniform! {
matrix: matrix,
color: color,
tex: glium::uniforms::Sampler(&texture.texture, glium::uniforms::SamplerBehavior {
magnify_filter: glium::uniforms::MagnifySamplerFilter::Linear,
minify_filter: glium::uniforms::MinifySamplerFilter::Linear,
.. Default::default()
})
};
let params = {
use glium::BlendingFunction::Addition;
use glium::LinearBlendingFactor::*;
let blending_function = Addition {
source: SourceAlpha,
destination: OneMinusSourceAlpha
};
let blend = glium::Blend {
color: blending_function,
alpha: blending_function,
constant_value: (1.0, 1.0, 1.0, 1.0),
};
DrawParameters {
blend: blend,
.. Default::default()
}
};
target.draw(vertex_buffer, index_buffer, &system.program, &uniforms,
¶ms).unwrap();
}
unsafe fn build_font_image(face: freetype::FT_Face, characters_list: Vec<char>, font_size: u32)
-> (TextureData, Vec<(char, CharacterInfos)>, u32)
{
use std::iter;
// a margin around each character to prevent artifacts
const MARGIN: u32 = 2;
// setting the right pixel size
if freetype::FT_Set_Pixel_Sizes(face, font_size, font_size)!= 0 {
panic!();
}
// this variable will store the texture data
// we set an arbitrary capacity that we think will match what we will need
let mut texture_data: Vec<f32> = Vec::with_capacity(characters_list.len() *
font_size as usize * font_size as usize);
// the width is chosen more or less arbitrarily, because we can store everything as long as
// the texture is at least as wide as the widest character
// we just try to estimate a width so that width ~= height
let texture_width = get_nearest_po2(std::cmp::max(font_size * 2 as u32,
((((characters_list.len() as u32) * font_size * font_size) as f32).sqrt()) as u32));
// we store the position of the "cursor" in the destination texture
// this cursor points to the top-left pixel of the next character to write on the texture
let mut cursor_offset = (0u32, 0u32);
// number of rows to skip at next carriage return
let mut rows_to_skip = 0u32;
// now looping through the list of characters, filling the texture and returning the informations
let mut em_pixels = font_size;
let mut characters_infos: Vec<(char, CharacterInfos)> = characters_list.into_iter().filter_map(|character| {
// loading wanted glyph in the font face
if freetype::FT_Load_Glyph(face, freetype::FT_Get_Char_Index(face, character as freetype::FT_ULong), freetype::FT_LOAD_RENDER)!= 0 {
return None;
}
let bitmap = &(*(*face).glyph).bitmap;
// adding a left margin before our character to prevent artifacts
cursor_offset.0 += MARGIN;
// computing em_pixels
// FIXME: this is hacky
if character == 'M' {
// println!("M [{}x{}] bitmap: {:?}", bitmap.width, bitmap.rows, std::slice::from_raw_parts(bitmap.buffer, (bitmap.rows * bitmap.width) as usize));
em_pixels = bitmap.rows as u32;
}
|
Ok(FontTexture { | random_line_split |
lib.rs | ),
// width and height of character in texture units
tex_size: (f32, f32),
// size of the character in EMs
size: (f32, f32),
// number of EMs between the bottom of the character and the base line of text
height_over_line: f32,
// number of EMs at the left of the character
left_padding: f32,
// number of EMs at the right of the character
right_padding: f32,
}
struct TextureData {
data: Vec<f32>,
width: u32,
height: u32,
}
impl<'a> glium::texture::Texture2dDataSource<'a> for &'a TextureData {
type Data = f32;
fn into_raw(self) -> glium::texture::RawImage2d<'a, f32> {
glium::texture::RawImage2d {
data: Cow::Borrowed(&self.data),
width: self.width,
height: self.height,
format: glium::texture::ClientFormat::F32,
}
}
}
#[derive(Copy, Clone)]
struct VertexFormat {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(VertexFormat, position, tex_coords);
impl FontTexture {
/// Creates a new texture representing a font stored in a `FontTexture`.
pub fn new<R, F>(facade: &F, font: R, font_size: u32)
-> Result<FontTexture, ()> where R: Read, F: Facade
{
// building the freetype library
// FIXME: call FT_Done_Library
let library = unsafe {
// taken from https://github.com/PistonDevelopers/freetype-rs/blob/master/src/library.rs
extern "C" fn alloc_library(_memory: freetype::FT_Memory, size: libc::c_long) -> *mut libc::c_void {
unsafe {
libc::malloc(size as libc::size_t)
}
}
extern "C" fn free_library(_memory: freetype::FT_Memory, block: *mut libc::c_void) {
unsafe {
libc::free(block)
}
}
extern "C" fn realloc_library(_memory: freetype::FT_Memory,
_cur_size: libc::c_long,
new_size: libc::c_long,
block: *mut libc::c_void) -> *mut libc::c_void {
unsafe {
libc::realloc(block, new_size as libc::size_t)
}
}
static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec {
user: 0 as *mut libc::c_void,
alloc: alloc_library,
free: free_library,
realloc: realloc_library,
};
let mut raw = ::std::ptr::null_mut();
if freetype::FT_New_Library(&mut MEMORY, &mut raw)!= freetype::FT_Err_Ok {
return Err(());
}
freetype::FT_Add_Default_Modules(raw);
raw
};
// building the freetype face object
let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect();
let face: freetype::FT_Face = unsafe {
let mut face = ::std::ptr::null_mut();
let err = freetype::FT_New_Memory_Face(library, font.as_ptr(),
font.len() as freetype::FT_Long, 0, &mut face);
if err == freetype::FT_Err_Ok {
face
} else {
return Err(());
}
};
// computing the list of characters in the font
let characters_list = unsafe {
// TODO: unresolved symbol
/*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE)!= 0 {
return Err(());
}*/
let mut result = Vec::new();
let mut g: freetype::FT_UInt = std::mem::uninitialized();
let mut c = freetype::FT_Get_First_Char(face, &mut g);
while g!= 0 {
result.push(std::mem::transmute(c as u32)); // TODO: better solution?
c = freetype::FT_Get_Next_Char(face, c, &mut g);
}
result
};
// building the infos
let (texture_data, chr_infos, em_pixels) = unsafe {
build_font_image(face, characters_list, font_size)
};
// we load the texture in the display
let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap();
Ok(FontTexture {
texture: texture,
character_infos: chr_infos,
em_pixels: em_pixels,
})
}
/// Return the size of an em-unit for the generated font texture.
/// This is needed for a pixel-perfect display: the text geometry is scaled so that
/// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels.
pub fn em_pixels(&self) -> u32 {
self.em_pixels
}
}
/*impl glium::uniforms::AsUniformValue for FontTexture {
fn as_uniform_value(&self) -> glium::uniforms::UniformValue {
glium::uniforms::AsUniformValue::as_uniform_value(&self.texture)
}
}*/
impl TextSystem {
/// Builds a new text system that must be used to build `TextDisplay` objects.
pub fn new<F>(facade: &F) -> TextSystem where F: Facade {
TextSystem {
context: facade.get_context().clone(),
program: program!(facade,
140 => {
vertex: "
#version 140
uniform mat4 matrix;
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
gl_Position = matrix * vec4(position, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 140
in vec2 v_tex_coords;
out vec4 f_color;
uniform vec4 color;
uniform sampler2D tex;
void main() {
vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords));
if (c.a <= 0.01) {
discard;
} else {
f_color = c;
}
}
"
},
110 => {
vertex: "
#version 110
attribute vec2 position;
attribute vec2 tex_coords;
varying vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 110
varying vec2 v_tex_coords;
uniform vec4 color;
uniform sampler2D tex;
void main() {
gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords));
if (gl_FragColor.a <= 0.01) {
discard;
}
}
"
},
).unwrap()
}
}
}
impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> {
/// Builds a new text display that allows you to draw text.
pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> |
/// Return the x-positions (in em-units) of the breaks between characters.
/// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character.
/// The last value of the array is the x-pos of the end of the string
pub fn get_char_pos_x(&self) -> &[f32] {
&self.char_pos_x
}
/// Modifies the text on this display.
pub fn set_text(&mut self, text: &str) {
self.is_empty = true;
self.char_pos_x = vec![0.];
self.vertex_buffer = None;
self.index_buffer = None;
// returning if no text
if text.len() == 0 {
return;
}
// these arrays will contain the vertex buffer and index buffer data
let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4);
let mut index_buffer_data = Vec::with_capacity(text.len() * 6);
// iterating over the characters of the string
let mut pos_x = 0.;
for character in text.chars() { // FIXME: wrong, but only thing stable
let infos = match self.texture.character_infos
.iter().find(|&&(chr, _)| chr == character)
{
Some(infos) => infos,
None => continue // character not found in the font, ignoring it
};
let infos = infos.1;
self.is_empty = false;
// adding the quad in the index buffer
{
let first_vertex_offset = vertex_buffer_data.len() as u16;
index_buffer_data.push(first_vertex_offset);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 3);
}
//
pos_x += infos.left_padding;
// calculating coords
let left_coord = pos_x;
let right_coord = left_coord + infos.size.0;
let top_coord = infos.height_over_line;
let bottom_coord = infos.height_over_line - infos.size.1;
// top-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, top_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1],
});
// top-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, top_coord],
tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1],
});
// bottom-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, bottom_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1],
});
// bottom-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, bottom_coord],
tex_coords: [
infos.tex_coords.0 + infos.tex_size.0,
infos.tex_coords.1 + infos.tex_size.1
],
});
// going to next char
pos_x = right_coord + infos.right_padding;
for _ in 0..character.len_utf8() {
self.char_pos_x.push(pos_x);
}
}
if!vertex_buffer_data.len()!= 0 {
// building the vertex buffer
self.vertex_buffer = Some(glium::VertexBuffer::new(&self.context,
&vertex_buffer_data).unwrap());
// building the index buffer
self.index_buffer = Some(glium::IndexBuffer::new(&self.context,
glium::index::PrimitiveType::TrianglesList,
&index_buffer_data).unwrap());
}
}
}
///
/// ## About the matrix
///
/// The matrix must be column-major post-muliplying (which is the usual way to do in OpenGL).
///
/// One unit in height corresponds to a line of text, but the text can go above or under.
/// The bottom of the line is at `0.0`, the top is at `1.0`.
/// You need to adapt your matrix by taking these into consideration.
pub fn draw<F, S:?Sized, M>(text: &TextDisplay<F>, system: &TextSystem, target: &mut S,
matrix: M, color: (f32, f32, f32, f32))
where S: glium::Surface, M: Into<[[f32; 4]; 4]>,
F: Deref<Target=FontTexture>
{
let matrix = matrix.into();
let &TextDisplay { ref vertex_buffer, ref index_buffer, ref texture, is_empty,.. } = text;
let color = [color.0, color.1, color.2, color.3];
// returning if nothing to draw
if is_empty || vertex_buffer.is_none() || index_buffer.is_none() {
return;
}
let vertex_buffer = vertex_buffer.as_ref().unwrap();
let index_buffer = index_buffer.as_ref().unwrap();
let uniforms = uniform! {
matrix: matrix,
color: color,
tex: glium::uniforms::Sampler(&texture.texture, glium::uniforms::SamplerBehavior {
magnify_filter: glium::uniforms::MagnifySamplerFilter::Linear,
minify_filter: glium::uniforms::MinifySamplerFilter::Linear,
.. Default::default()
})
};
let params = {
use glium::BlendingFunction::Addition;
use glium::LinearBlendingFactor::*;
let blending_function = Addition {
source: SourceAlpha,
destination: OneMinusSourceAlpha
};
let blend = glium::Blend {
color: blending_function,
alpha: blending_function,
constant_value: (1.0, 1.0, 1.0, 1.0),
};
DrawParameters {
blend: blend,
.. Default::default()
}
};
target.draw(vertex_buffer, index_buffer, &system.program, &uniforms,
¶ms).unwrap();
}
unsafe fn build_font_image(face: freetype::FT_Face, characters_list: Vec<char>, font_size: u32)
-> (TextureData, Vec<(char, CharacterInfos)>, u32)
{
use std::iter;
// a margin around each character to prevent artifacts
const MARGIN: u32 = 2;
// setting the right pixel size
if freetype::FT_Set_Pixel_Sizes(face, font_size, font_size)!= 0 {
panic!();
}
// this variable will store the texture data
// we set an arbitrary capacity that we think will match what we will need
let mut texture_data: Vec<f32> = Vec::with_capacity(characters_list.len() *
font_size as usize * font_size as usize);
// the width is chosen more or less arbitrarily, because we can store everything as long as
// the texture is at least as wide as the widest character
// we just try to estimate a width so that width ~= height
let texture_width = get_nearest_po2(std::cmp::max(font_size * 2 as u32,
((((characters_list.len() as u32) * font_size * font_size) as f32).sqrt()) as u32));
// we store the position of the "cursor" in the destination texture
// this cursor points to the top-left pixel of the next character to write on the texture
let mut cursor_offset = (0u32, 0u32);
// number of rows to skip at next carriage return
let mut rows_to_skip = 0u32;
// now looping through the list of characters, filling the texture and returning the informations
let mut em_pixels = font_size;
let mut characters_infos: Vec<(char, CharacterInfos)> = characters_list.into_iter().filter_map(|character| {
// loading wanted glyph in the font face
if freetype::FT_Load_Glyph(face, freetype::FT_Get_Char_Index(face, character as freetype::FT_ULong), freetype::FT_LOAD_RENDER)!= 0 {
return None;
}
let bitmap = &(*(*face).glyph).bitmap;
// adding a left margin before our character to prevent artifacts
cursor_offset.0 += MARGIN;
// computing em_pixels
// FIXME: this is hacky
if character == 'M' {
// println!("M [{}x{}] bitmap: {:?}", bitmap.width, bitmap.rows, std::slice::from_raw_parts(bitmap.buffer, (bitmap.rows * bitmap.width) as usize));
em_pixels = bitmap.rows as u32;
}
| {
let mut text_display = TextDisplay {
context: system.context.clone(),
texture: texture,
vertex_buffer: None,
index_buffer: None,
char_pos_x: vec![],
is_empty: true,
};
text_display.set_text(text);
text_display
} | identifier_body |
lib.rs | ),
// width and height of character in texture units
tex_size: (f32, f32),
// size of the character in EMs
size: (f32, f32),
// number of EMs between the bottom of the character and the base line of text
height_over_line: f32,
// number of EMs at the left of the character
left_padding: f32,
// number of EMs at the right of the character
right_padding: f32,
}
struct TextureData {
data: Vec<f32>,
width: u32,
height: u32,
}
impl<'a> glium::texture::Texture2dDataSource<'a> for &'a TextureData {
type Data = f32;
fn into_raw(self) -> glium::texture::RawImage2d<'a, f32> {
glium::texture::RawImage2d {
data: Cow::Borrowed(&self.data),
width: self.width,
height: self.height,
format: glium::texture::ClientFormat::F32,
}
}
}
#[derive(Copy, Clone)]
struct VertexFormat {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(VertexFormat, position, tex_coords);
impl FontTexture {
/// Creates a new texture representing a font stored in a `FontTexture`.
pub fn new<R, F>(facade: &F, font: R, font_size: u32)
-> Result<FontTexture, ()> where R: Read, F: Facade
{
// building the freetype library
// FIXME: call FT_Done_Library
let library = unsafe {
// taken from https://github.com/PistonDevelopers/freetype-rs/blob/master/src/library.rs
extern "C" fn alloc_library(_memory: freetype::FT_Memory, size: libc::c_long) -> *mut libc::c_void {
unsafe {
libc::malloc(size as libc::size_t)
}
}
extern "C" fn free_library(_memory: freetype::FT_Memory, block: *mut libc::c_void) {
unsafe {
libc::free(block)
}
}
extern "C" fn realloc_library(_memory: freetype::FT_Memory,
_cur_size: libc::c_long,
new_size: libc::c_long,
block: *mut libc::c_void) -> *mut libc::c_void {
unsafe {
libc::realloc(block, new_size as libc::size_t)
}
}
static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec {
user: 0 as *mut libc::c_void,
alloc: alloc_library,
free: free_library,
realloc: realloc_library,
};
let mut raw = ::std::ptr::null_mut();
if freetype::FT_New_Library(&mut MEMORY, &mut raw)!= freetype::FT_Err_Ok {
return Err(());
}
freetype::FT_Add_Default_Modules(raw);
raw
};
// building the freetype face object
let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect();
let face: freetype::FT_Face = unsafe {
let mut face = ::std::ptr::null_mut();
let err = freetype::FT_New_Memory_Face(library, font.as_ptr(),
font.len() as freetype::FT_Long, 0, &mut face);
if err == freetype::FT_Err_Ok {
face
} else {
return Err(());
}
};
// computing the list of characters in the font
let characters_list = unsafe {
// TODO: unresolved symbol
/*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE)!= 0 {
return Err(());
}*/
let mut result = Vec::new();
let mut g: freetype::FT_UInt = std::mem::uninitialized();
let mut c = freetype::FT_Get_First_Char(face, &mut g);
while g!= 0 {
result.push(std::mem::transmute(c as u32)); // TODO: better solution?
c = freetype::FT_Get_Next_Char(face, c, &mut g);
}
result
};
// building the infos
let (texture_data, chr_infos, em_pixels) = unsafe {
build_font_image(face, characters_list, font_size)
};
// we load the texture in the display
let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap();
Ok(FontTexture {
texture: texture,
character_infos: chr_infos,
em_pixels: em_pixels,
})
}
/// Return the size of an em-unit for the generated font texture.
/// This is needed for a pixel-perfect display: the text geometry is scaled so that
/// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels.
pub fn em_pixels(&self) -> u32 {
self.em_pixels
}
}
/*impl glium::uniforms::AsUniformValue for FontTexture {
fn as_uniform_value(&self) -> glium::uniforms::UniformValue {
glium::uniforms::AsUniformValue::as_uniform_value(&self.texture)
}
}*/
impl TextSystem {
/// Builds a new text system that must be used to build `TextDisplay` objects.
pub fn new<F>(facade: &F) -> TextSystem where F: Facade {
TextSystem {
context: facade.get_context().clone(),
program: program!(facade,
140 => {
vertex: "
#version 140
uniform mat4 matrix;
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
gl_Position = matrix * vec4(position, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 140
in vec2 v_tex_coords;
out vec4 f_color;
uniform vec4 color;
uniform sampler2D tex;
void main() {
vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords));
if (c.a <= 0.01) {
discard;
} else {
f_color = c;
}
}
"
},
110 => {
vertex: "
#version 110
attribute vec2 position;
attribute vec2 tex_coords;
varying vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 110
varying vec2 v_tex_coords;
uniform vec4 color;
uniform sampler2D tex;
void main() {
gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords));
if (gl_FragColor.a <= 0.01) {
discard;
}
}
"
},
).unwrap()
}
}
}
impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> {
/// Builds a new text display that allows you to draw text.
pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> {
let mut text_display = TextDisplay {
context: system.context.clone(),
texture: texture,
vertex_buffer: None,
index_buffer: None,
char_pos_x: vec![],
is_empty: true,
};
text_display.set_text(text);
text_display
}
/// Return the x-positions (in em-units) of the breaks between characters.
/// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character.
/// The last value of the array is the x-pos of the end of the string
pub fn get_char_pos_x(&self) -> &[f32] {
&self.char_pos_x
}
/// Modifies the text on this display.
pub fn set_text(&mut self, text: &str) {
self.is_empty = true;
self.char_pos_x = vec![0.];
self.vertex_buffer = None;
self.index_buffer = None;
// returning if no text
if text.len() == 0 {
return;
}
// these arrays will contain the vertex buffer and index buffer data
let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4);
let mut index_buffer_data = Vec::with_capacity(text.len() * 6);
// iterating over the characters of the string
let mut pos_x = 0.;
for character in text.chars() { // FIXME: wrong, but only thing stable
let infos = match self.texture.character_infos
.iter().find(|&&(chr, _)| chr == character)
{
Some(infos) => infos,
None => continue // character not found in the font, ignoring it
};
let infos = infos.1;
self.is_empty = false;
// adding the quad in the index buffer
{
let first_vertex_offset = vertex_buffer_data.len() as u16;
index_buffer_data.push(first_vertex_offset);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 3);
}
//
pos_x += infos.left_padding;
// calculating coords
let left_coord = pos_x;
let right_coord = left_coord + infos.size.0;
let top_coord = infos.height_over_line;
let bottom_coord = infos.height_over_line - infos.size.1;
// top-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, top_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1],
});
// top-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, top_coord],
tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1],
});
// bottom-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, bottom_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1],
});
// bottom-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, bottom_coord],
tex_coords: [
infos.tex_coords.0 + infos.tex_size.0,
infos.tex_coords.1 + infos.tex_size.1
],
});
// going to next char
pos_x = right_coord + infos.right_padding;
for _ in 0..character.len_utf8() {
self.char_pos_x.push(pos_x);
}
}
if!vertex_buffer_data.len()!= 0 {
// building the vertex buffer
self.vertex_buffer = Some(glium::VertexBuffer::new(&self.context,
&vertex_buffer_data).unwrap());
// building the index buffer
self.index_buffer = Some(glium::IndexBuffer::new(&self.context,
glium::index::PrimitiveType::TrianglesList,
&index_buffer_data).unwrap());
}
}
}
///
/// ## About the matrix
///
/// The matrix must be column-major post-muliplying (which is the usual way to do in OpenGL).
///
/// One unit in height corresponds to a line of text, but the text can go above or under.
/// The bottom of the line is at `0.0`, the top is at `1.0`.
/// You need to adapt your matrix by taking these into consideration.
pub fn | <F, S:?Sized, M>(text: &TextDisplay<F>, system: &TextSystem, target: &mut S,
matrix: M, color: (f32, f32, f32, f32))
where S: glium::Surface, M: Into<[[f32; 4]; 4]>,
F: Deref<Target=FontTexture>
{
let matrix = matrix.into();
let &TextDisplay { ref vertex_buffer, ref index_buffer, ref texture, is_empty,.. } = text;
let color = [color.0, color.1, color.2, color.3];
// returning if nothing to draw
if is_empty || vertex_buffer.is_none() || index_buffer.is_none() {
return;
}
let vertex_buffer = vertex_buffer.as_ref().unwrap();
let index_buffer = index_buffer.as_ref().unwrap();
let uniforms = uniform! {
matrix: matrix,
color: color,
tex: glium::uniforms::Sampler(&texture.texture, glium::uniforms::SamplerBehavior {
magnify_filter: glium::uniforms::MagnifySamplerFilter::Linear,
minify_filter: glium::uniforms::MinifySamplerFilter::Linear,
.. Default::default()
})
};
let params = {
use glium::BlendingFunction::Addition;
use glium::LinearBlendingFactor::*;
let blending_function = Addition {
source: SourceAlpha,
destination: OneMinusSourceAlpha
};
let blend = glium::Blend {
color: blending_function,
alpha: blending_function,
constant_value: (1.0, 1.0, 1.0, 1.0),
};
DrawParameters {
blend: blend,
.. Default::default()
}
};
target.draw(vertex_buffer, index_buffer, &system.program, &uniforms,
¶ms).unwrap();
}
unsafe fn build_font_image(face: freetype::FT_Face, characters_list: Vec<char>, font_size: u32)
-> (TextureData, Vec<(char, CharacterInfos)>, u32)
{
use std::iter;
// a margin around each character to prevent artifacts
const MARGIN: u32 = 2;
// setting the right pixel size
if freetype::FT_Set_Pixel_Sizes(face, font_size, font_size)!= 0 {
panic!();
}
// this variable will store the texture data
// we set an arbitrary capacity that we think will match what we will need
let mut texture_data: Vec<f32> = Vec::with_capacity(characters_list.len() *
font_size as usize * font_size as usize);
// the width is chosen more or less arbitrarily, because we can store everything as long as
// the texture is at least as wide as the widest character
// we just try to estimate a width so that width ~= height
let texture_width = get_nearest_po2(std::cmp::max(font_size * 2 as u32,
((((characters_list.len() as u32) * font_size * font_size) as f32).sqrt()) as u32));
// we store the position of the "cursor" in the destination texture
// this cursor points to the top-left pixel of the next character to write on the texture
let mut cursor_offset = (0u32, 0u32);
// number of rows to skip at next carriage return
let mut rows_to_skip = 0u32;
// now looping through the list of characters, filling the texture and returning the informations
let mut em_pixels = font_size;
let mut characters_infos: Vec<(char, CharacterInfos)> = characters_list.into_iter().filter_map(|character| {
// loading wanted glyph in the font face
if freetype::FT_Load_Glyph(face, freetype::FT_Get_Char_Index(face, character as freetype::FT_ULong), freetype::FT_LOAD_RENDER)!= 0 {
return None;
}
let bitmap = &(*(*face).glyph).bitmap;
// adding a left margin before our character to prevent artifacts
cursor_offset.0 += MARGIN;
// computing em_pixels
// FIXME: this is hacky
if character == 'M' {
// println!("M [{}x{}] bitmap: {:?}", bitmap.width, bitmap.rows, std::slice::from_raw_parts(bitmap.buffer, (bitmap.rows * bitmap.width) as usize));
em_pixels = bitmap.rows as u32;
}
| draw | identifier_name |
lib.rs | ),
// width and height of character in texture units
tex_size: (f32, f32),
// size of the character in EMs
size: (f32, f32),
// number of EMs between the bottom of the character and the base line of text
height_over_line: f32,
// number of EMs at the left of the character
left_padding: f32,
// number of EMs at the right of the character
right_padding: f32,
}
struct TextureData {
data: Vec<f32>,
width: u32,
height: u32,
}
impl<'a> glium::texture::Texture2dDataSource<'a> for &'a TextureData {
type Data = f32;
fn into_raw(self) -> glium::texture::RawImage2d<'a, f32> {
glium::texture::RawImage2d {
data: Cow::Borrowed(&self.data),
width: self.width,
height: self.height,
format: glium::texture::ClientFormat::F32,
}
}
}
#[derive(Copy, Clone)]
struct VertexFormat {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(VertexFormat, position, tex_coords);
impl FontTexture {
/// Creates a new texture representing a font stored in a `FontTexture`.
pub fn new<R, F>(facade: &F, font: R, font_size: u32)
-> Result<FontTexture, ()> where R: Read, F: Facade
{
// building the freetype library
// FIXME: call FT_Done_Library
let library = unsafe {
// taken from https://github.com/PistonDevelopers/freetype-rs/blob/master/src/library.rs
extern "C" fn alloc_library(_memory: freetype::FT_Memory, size: libc::c_long) -> *mut libc::c_void {
unsafe {
libc::malloc(size as libc::size_t)
}
}
extern "C" fn free_library(_memory: freetype::FT_Memory, block: *mut libc::c_void) {
unsafe {
libc::free(block)
}
}
extern "C" fn realloc_library(_memory: freetype::FT_Memory,
_cur_size: libc::c_long,
new_size: libc::c_long,
block: *mut libc::c_void) -> *mut libc::c_void {
unsafe {
libc::realloc(block, new_size as libc::size_t)
}
}
static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec {
user: 0 as *mut libc::c_void,
alloc: alloc_library,
free: free_library,
realloc: realloc_library,
};
let mut raw = ::std::ptr::null_mut();
if freetype::FT_New_Library(&mut MEMORY, &mut raw)!= freetype::FT_Err_Ok {
return Err(());
}
freetype::FT_Add_Default_Modules(raw);
raw
};
// building the freetype face object
let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect();
let face: freetype::FT_Face = unsafe {
let mut face = ::std::ptr::null_mut();
let err = freetype::FT_New_Memory_Face(library, font.as_ptr(),
font.len() as freetype::FT_Long, 0, &mut face);
if err == freetype::FT_Err_Ok {
face
} else {
return Err(());
}
};
// computing the list of characters in the font
let characters_list = unsafe {
// TODO: unresolved symbol
/*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE)!= 0 {
return Err(());
}*/
let mut result = Vec::new();
let mut g: freetype::FT_UInt = std::mem::uninitialized();
let mut c = freetype::FT_Get_First_Char(face, &mut g);
while g!= 0 {
result.push(std::mem::transmute(c as u32)); // TODO: better solution?
c = freetype::FT_Get_Next_Char(face, c, &mut g);
}
result
};
// building the infos
let (texture_data, chr_infos, em_pixels) = unsafe {
build_font_image(face, characters_list, font_size)
};
// we load the texture in the display
let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap();
Ok(FontTexture {
texture: texture,
character_infos: chr_infos,
em_pixels: em_pixels,
})
}
/// Return the size of an em-unit for the generated font texture.
/// This is needed for a pixel-perfect display: the text geometry is scaled so that
/// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels.
pub fn em_pixels(&self) -> u32 {
self.em_pixels
}
}
/*impl glium::uniforms::AsUniformValue for FontTexture {
fn as_uniform_value(&self) -> glium::uniforms::UniformValue {
glium::uniforms::AsUniformValue::as_uniform_value(&self.texture)
}
}*/
impl TextSystem {
/// Builds a new text system that must be used to build `TextDisplay` objects.
pub fn new<F>(facade: &F) -> TextSystem where F: Facade {
TextSystem {
context: facade.get_context().clone(),
program: program!(facade,
140 => {
vertex: "
#version 140
uniform mat4 matrix;
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
gl_Position = matrix * vec4(position, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 140
in vec2 v_tex_coords;
out vec4 f_color;
uniform vec4 color;
uniform sampler2D tex;
void main() {
vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords));
if (c.a <= 0.01) {
discard;
} else {
f_color = c;
}
}
"
},
110 => {
vertex: "
#version 110
attribute vec2 position;
attribute vec2 tex_coords;
varying vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 110
varying vec2 v_tex_coords;
uniform vec4 color;
uniform sampler2D tex;
void main() {
gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords));
if (gl_FragColor.a <= 0.01) {
discard;
}
}
"
},
).unwrap()
}
}
}
impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> {
/// Builds a new text display that allows you to draw text.
pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> {
let mut text_display = TextDisplay {
context: system.context.clone(),
texture: texture,
vertex_buffer: None,
index_buffer: None,
char_pos_x: vec![],
is_empty: true,
};
text_display.set_text(text);
text_display
}
/// Return the x-positions (in em-units) of the breaks between characters.
/// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character.
/// The last value of the array is the x-pos of the end of the string
pub fn get_char_pos_x(&self) -> &[f32] {
&self.char_pos_x
}
/// Modifies the text on this display.
pub fn set_text(&mut self, text: &str) {
self.is_empty = true;
self.char_pos_x = vec![0.];
self.vertex_buffer = None;
self.index_buffer = None;
// returning if no text
if text.len() == 0 {
return;
}
// these arrays will contain the vertex buffer and index buffer data
let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4);
let mut index_buffer_data = Vec::with_capacity(text.len() * 6);
// iterating over the characters of the string
let mut pos_x = 0.;
for character in text.chars() { // FIXME: wrong, but only thing stable
let infos = match self.texture.character_infos
.iter().find(|&&(chr, _)| chr == character)
{
Some(infos) => infos,
None => continue // character not found in the font, ignoring it
};
let infos = infos.1;
self.is_empty = false;
// adding the quad in the index buffer
{
let first_vertex_offset = vertex_buffer_data.len() as u16;
index_buffer_data.push(first_vertex_offset);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 3);
}
//
pos_x += infos.left_padding;
// calculating coords
let left_coord = pos_x;
let right_coord = left_coord + infos.size.0;
let top_coord = infos.height_over_line;
let bottom_coord = infos.height_over_line - infos.size.1;
// top-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, top_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1],
});
// top-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, top_coord],
tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1],
});
// bottom-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, bottom_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1],
});
// bottom-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, bottom_coord],
tex_coords: [
infos.tex_coords.0 + infos.tex_size.0,
infos.tex_coords.1 + infos.tex_size.1
],
});
// going to next char
pos_x = right_coord + infos.right_padding;
for _ in 0..character.len_utf8() {
self.char_pos_x.push(pos_x);
}
}
if!vertex_buffer_data.len()!= 0 |
}
}
///
/// ## About the matrix
///
/// The matrix must be column-major post-muliplying (which is the usual way to do in OpenGL).
///
/// One unit in height corresponds to a line of text, but the text can go above or under.
/// The bottom of the line is at `0.0`, the top is at `1.0`.
/// You need to adapt your matrix by taking these into consideration.
pub fn draw<F, S:?Sized, M>(text: &TextDisplay<F>, system: &TextSystem, target: &mut S,
matrix: M, color: (f32, f32, f32, f32))
where S: glium::Surface, M: Into<[[f32; 4]; 4]>,
F: Deref<Target=FontTexture>
{
let matrix = matrix.into();
let &TextDisplay { ref vertex_buffer, ref index_buffer, ref texture, is_empty,.. } = text;
let color = [color.0, color.1, color.2, color.3];
// returning if nothing to draw
if is_empty || vertex_buffer.is_none() || index_buffer.is_none() {
return;
}
let vertex_buffer = vertex_buffer.as_ref().unwrap();
let index_buffer = index_buffer.as_ref().unwrap();
let uniforms = uniform! {
matrix: matrix,
color: color,
tex: glium::uniforms::Sampler(&texture.texture, glium::uniforms::SamplerBehavior {
magnify_filter: glium::uniforms::MagnifySamplerFilter::Linear,
minify_filter: glium::uniforms::MinifySamplerFilter::Linear,
.. Default::default()
})
};
let params = {
use glium::BlendingFunction::Addition;
use glium::LinearBlendingFactor::*;
let blending_function = Addition {
source: SourceAlpha,
destination: OneMinusSourceAlpha
};
let blend = glium::Blend {
color: blending_function,
alpha: blending_function,
constant_value: (1.0, 1.0, 1.0, 1.0),
};
DrawParameters {
blend: blend,
.. Default::default()
}
};
target.draw(vertex_buffer, index_buffer, &system.program, &uniforms,
¶ms).unwrap();
}
unsafe fn build_font_image(face: freetype::FT_Face, characters_list: Vec<char>, font_size: u32)
-> (TextureData, Vec<(char, CharacterInfos)>, u32)
{
use std::iter;
// a margin around each character to prevent artifacts
const MARGIN: u32 = 2;
// setting the right pixel size
if freetype::FT_Set_Pixel_Sizes(face, font_size, font_size)!= 0 {
panic!();
}
// this variable will store the texture data
// we set an arbitrary capacity that we think will match what we will need
let mut texture_data: Vec<f32> = Vec::with_capacity(characters_list.len() *
font_size as usize * font_size as usize);
// the width is chosen more or less arbitrarily, because we can store everything as long as
// the texture is at least as wide as the widest character
// we just try to estimate a width so that width ~= height
let texture_width = get_nearest_po2(std::cmp::max(font_size * 2 as u32,
((((characters_list.len() as u32) * font_size * font_size) as f32).sqrt()) as u32));
// we store the position of the "cursor" in the destination texture
// this cursor points to the top-left pixel of the next character to write on the texture
let mut cursor_offset = (0u32, 0u32);
// number of rows to skip at next carriage return
let mut rows_to_skip = 0u32;
// now looping through the list of characters, filling the texture and returning the informations
let mut em_pixels = font_size;
let mut characters_infos: Vec<(char, CharacterInfos)> = characters_list.into_iter().filter_map(|character| {
// loading wanted glyph in the font face
if freetype::FT_Load_Glyph(face, freetype::FT_Get_Char_Index(face, character as freetype::FT_ULong), freetype::FT_LOAD_RENDER)!= 0 {
return None;
}
let bitmap = &(*(*face).glyph).bitmap;
// adding a left margin before our character to prevent artifacts
cursor_offset.0 += MARGIN;
// computing em_pixels
// FIXME: this is hacky
if character == 'M' {
// println!("M [{}x{}] bitmap: {:?}", bitmap.width, bitmap.rows, std::slice::from_raw_parts(bitmap.buffer, (bitmap.rows * bitmap.width) as usize));
em_pixels = bitmap.rows as u32;
}
| {
// building the vertex buffer
self.vertex_buffer = Some(glium::VertexBuffer::new(&self.context,
&vertex_buffer_data).unwrap());
// building the index buffer
self.index_buffer = Some(glium::IndexBuffer::new(&self.context,
glium::index::PrimitiveType::TrianglesList,
&index_buffer_data).unwrap());
} | conditional_block |
lib.rs | //! # MCAI Worker SDK
//!
//! This library is an SDK to communicate via message broker with [StepFlow](https://hexdocs.pm/step_flow/readme.html).
//! It's used for every worker as an abstraction.
//! It manage itself requirements, message parsing, direct messaging.
//!
//! ## Worker implementation
//!
//! 1. Create a Rust project
//! 2. Add MCAI Worker SDK as a dependency in Cargo.toml: `mcai_worker_sdk = "^1.0"`
//! 1. Update the main file with the example provided here to implement [MessageEvent](trait.MessageEvent.html) trait,
//! and call the [`start_worker`](fn.start_worker.html) to start the worker itself.
//!
//! ```rust
//! use mcai_worker_sdk::{
//! MessageEvent,
//! Version,
//! worker::Parameter,
//! };
//! use serde_derive::Deserialize;
//! use schemars::JsonSchema;
//!
//! #[derive(Debug)]
//! struct WorkerNameEvent {}
//!
//! #[derive(Debug, Deserialize, JsonSchema)]
//! struct WorkerParameters {}
//!
//! impl MessageEvent<WorkerParameters> for WorkerNameEvent {
//! fn get_name(&self) -> String {"sample_worker".to_string()}
//! fn get_short_description(&self) -> String {"Short description".to_string()}
//! fn get_description(&self) -> String {"Long description".to_string()}
//! fn get_version(&self) -> Version { Version::new(0, 0, 1) }
//! }
//! static WORKER_NAME_EVENT: WorkerNameEvent = WorkerNameEvent {};
//!
//! // uncomment it to start the worker
//! // fn main() {
//! // mcai_worker_sdk::start_worker(&WORKER_NAME_EVENT);
//! // }
//! ```
//!
//! ## Runtime configuration
//!
//! ### AMQP connection
//!
//! | Variable | Description |
//! |-----------------|-------------|
//! | `AMQP_HOSTNAME` | IP or host of AMQP server (default: `localhost`) |
//! | `AMQP_PORT` | AMQP server port (default: `5672`) |
//! | `AMQP_TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) |
//! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) |
//! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) |
//! | `AMQP_VHOST` | AMQP virtual host (default: `/`) |
//! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) |
//!
//! ### Vault connection
//!
//! | Variable | Description |
//! |--------------------|-------------|
//! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) |
//! | `BACKEND_USERNAME` | Username used to connect to backend server |
//! | `BACKEND_PASSWORD` | Password used to connect to backend server |
//!
//! ## Start worker locally
//!
//! MCAI Worker SDK can be launched locally - without RabbitMQ.
//! It can process some message for different purpose (functional tests, message order examples, etc.).
//!
//! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders.
//! It can take multiple orders, joined with `:` on unix platform, `;` on windows os.
//!
//! ### Examples:
//!
//! ```bash
//! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker
//! ```
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[cfg(feature = "media")]
#[macro_use]
extern crate yaserde_derive;
mod channels;
mod config;
mod error;
pub mod job;
pub mod message;
pub mod parameter;
pub mod worker;
/// Re-export from lapin Channel
pub use lapin::Channel;
pub use log::{debug, error, info, trace, warn};
pub use schemars::JsonSchema;
/// Re-export from semver:
pub use semver::Version;
pub use error::{MessageError, Result};
#[cfg(feature = "media")]
pub use message::media::{
audio::AudioFormat,
ebu_ttml_live::{
Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title,
},
filters::{AudioFilter, GenericFilter, VideoFilter},
video::{RegionOfInterest, Scaling, VideoFormat},
StreamDescriptor,
};
pub use message::publish_job_progression;
pub use parameter::container::ParametersContainer;
pub use parameter::{Parameter, ParameterValue, Requirement};
#[cfg(feature = "media")]
pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame};
use crate::worker::docker;
use chrono::prelude::*;
use config::*;
use env_logger::Builder;
use futures_executor::LocalPool;
use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt};
use job::JobResult;
use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties};
use serde::de::DeserializeOwned;
#[cfg(feature = "media")]
use serde::Serialize;
use std::str::FromStr;
#[cfg(feature = "media")]
use std::sync::{mpsc::Sender, Mutex};
use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time};
#[cfg(feature = "media")]
use yaserde::YaSerialize;
/// Exposed Channel type
pub type McaiChannel = Arc<Channel>;
#[cfg(feature = "media")]
#[derive(Debug)]
pub struct ProcessResult {
end_of_process: bool,
json_content: Option<String>,
xml_content: Option<String>,
}
#[cfg(feature = "media")]
impl ProcessResult {
pub fn empty() -> Self {
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: None,
}
}
pub fn end_of_process() -> Self {
ProcessResult {
end_of_process: true,
json_content: None,
xml_content: None,
}
}
pub fn new_json<S: Serialize>(content: S) -> Self {
let content = serde_json::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: Some(content),
xml_content: None,
}
}
pub fn new_xml<Y: YaSerialize>(content: Y) -> Self {
let content = yaserde::ser::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: Some(content),
}
}
}
#[cfg(feature = "media")]
pub enum ProcessFrame {
AudioVideo(Frame),
EbuTtmlLive(Box<EbuTtmlLive>),
Data(Vec<u8>),
}
#[cfg(feature = "media")]
impl ProcessFrame {
pub fn get_pts(&self) -> i64 {
match self {
ProcessFrame::AudioVideo(frame) => frame.get_pts(),
ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => {
// improvement: support pts to terminate
0
}
}
}
}
/// # Trait to describe a worker
/// Implement this trait to implement a worker
pub trait MessageEvent<P: DeserializeOwned + JsonSchema> {
fn get_name(&self) -> String;
fn get_short_description(&self) -> String;
fn get_description(&self) -> String;
fn get_version(&self) -> semver::Version;
fn init(&mut self) -> Result<()> {
Ok(())
}
#[cfg(feature = "media")]
fn init_process(
&mut self,
_parameters: P,
_format_context: Arc<Mutex<FormatContext>>,
_response_sender: Arc<Mutex<Sender<ProcessResult>>>,
) -> Result<Vec<StreamDescriptor>> {
Ok(vec![])
}
#[cfg(feature = "media")]
fn process_frame(
&mut self,
_job_result: JobResult,
_stream_index: usize,
_frame: ProcessFrame,
) -> Result<ProcessResult> {
Err(MessageError::NotImplemented())
}
#[cfg(feature = "media")]
fn ending_process(&mut self) -> Result<()> {
Ok(())
}
/// Not called when the "media" feature is enabled
fn process(
&self,
_channel: Option<McaiChannel>,
_parameters: P,
_job_result: JobResult,
) -> Result<JobResult>
where
Self: std::marker::Sized,
{
Err(MessageError::NotImplemented())
}
}
/// Function to start a worker
pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME)
where
ME: std::marker::Sync,
{
let mut builder = Builder::from_default_env();
let amqp_queue = get_amqp_queue();
let instance_id = docker::get_instance_id("/proc/self/cgroup");
let container_id = instance_id.clone();
builder
.format(move |stream, record| {
writeln!(
stream,
"{} - {} - {} - {} - {} - {}",
Utc::now(),
&container_id,
get_amqp_queue(),
record.target().parse::<i64>().unwrap_or(-1),
record.level(),
record.args(),
)
})
.init();
let worker_configuration =
worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id);
if let Err(configuration_error) = worker_configuration {
error!("{:?}", configuration_error);
return;
}
let worker_configuration = worker_configuration.unwrap();
info!(
"Worker: {}, version: {} (MCAI Worker SDK {})",
worker_configuration.get_worker_name(),
worker_configuration.get_worker_version(),
worker_configuration.get_sdk_version(),
);
if let Ok(enabled) = std::env::var("DESCRIBE") {
if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) {
match serde_json::to_string_pretty(&worker_configuration) {
Ok(serialized_configuration) => {
println!("{}", serialized_configuration);
return;
}
Err(error) => error!("Could not serialize worker configuration: {:?}", error),
}
}
}
if let Err(message) = message_event.init() {
error!("{:?}", message);
return;
}
let message_event_ref = Rc::new(RefCell::new(message_event));
info!("Worker initialized, ready to receive jobs");
if let Some(source_orders) = get_source_orders() {
warn!("Worker will process source orders");
for source_order in &source_orders {
info!("Start to process order: {:?}", source_order);
let count = None;
let channel = None;
let message_data = fs::read_to_string(source_order).unwrap();
let result = message::parse_and_process_message(
message_event_ref.clone(),
&message_data,
count,
channel,
message::publish_job_progression,
);
match result {
Ok(mut job_result) => |
Err(message) => {
error!("{:?}", message);
}
}
}
return;
}
loop {
let amqp_uri = get_amqp_uri();
let mut executor = LocalPool::new();
let spawner = executor.spawner();
executor.run_until(async {
let conn = Connection::connect_uri(
amqp_uri,
ConnectionProperties::default().with_default_executor(8),
)
.wait()
.unwrap();
info!("Connected");
let channel = Arc::new(channels::declare_consumer_channel(
&conn,
&worker_configuration,
));
let consumer = channel
.clone()
.basic_consume(
&amqp_queue,
"amqp_worker",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.unwrap();
let status_consumer = channel
.clone()
.basic_consume(
&worker_configuration.get_direct_messaging_queue_name(),
"status_amqp_worker",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.unwrap();
let status_response_channel = channel.clone();
let status_worker_configuration = worker_configuration.clone();
let _consumer = spawner.spawn_local(async move {
status_consumer
.for_each(move |delivery| {
let (_channel, delivery) = delivery.expect("error caught in in consumer");
worker::system_information::send_real_time_information(
delivery,
&status_response_channel,
&status_worker_configuration,
)
.map(|_| ())
})
.await
});
info!("Start to consume on queue {:?}", amqp_queue);
let clone_channel = channel.clone();
let message_event = message_event_ref.clone();
consumer
.for_each(move |delivery| {
let (_channel, delivery) = delivery.expect("error caught in in consumer");
message::process_message(message_event.clone(), delivery, clone_channel.clone())
.map(|_| ())
})
.await
});
let sleep_duration = time::Duration::new(1, 0);
thread::sleep(sleep_duration);
info!("Reconnection...");
}
}
#[test]
fn empty_message_event_impl() {
#[derive(Debug)]
struct CustomEvent {}
#[derive(JsonSchema, Deserialize)]
struct CustomParameters {}
impl MessageEvent<CustomParameters> for CustomEvent {
fn get_name(&self) -> String {
"custom".to_string()
}
fn get_short_description(&self) -> String {
"short description".to_string()
}
fn get_description(&self) -> String {
"long description".to_string()
}
fn get_version(&self) -> semver::Version {
semver::Version::new(1, 2, 3)
}
}
let custom_event = CustomEvent {};
let parameters = CustomParameters {};
let job = job::Job {
job_id: 1234,
parameters: vec![],
};
let job_result = job::JobResult::new(job.job_id);
let result = custom_event.process(None, parameters, job_result);
assert!(result == Err(MessageError::NotImplemented()));
}
| {
job_result.update_execution_duration();
info!(target: &job_result.get_job_id().to_string(), "Process succeeded: {:?}", job_result)
} | conditional_block |
lib.rs | //! # MCAI Worker SDK
//!
//! This library is an SDK to communicate via message broker with [StepFlow](https://hexdocs.pm/step_flow/readme.html).
//! It's used for every worker as an abstraction.
//! It manage itself requirements, message parsing, direct messaging.
//!
//! ## Worker implementation
//!
//! 1. Create a Rust project
//! 2. Add MCAI Worker SDK as a dependency in Cargo.toml: `mcai_worker_sdk = "^1.0"`
//! 1. Update the main file with the example provided here to implement [MessageEvent](trait.MessageEvent.html) trait,
//! and call the [`start_worker`](fn.start_worker.html) to start the worker itself.
//!
//! ```rust
//! use mcai_worker_sdk::{
//! MessageEvent,
//! Version,
//! worker::Parameter,
//! };
//! use serde_derive::Deserialize;
//! use schemars::JsonSchema;
//!
//! #[derive(Debug)]
//! struct WorkerNameEvent {}
//!
//! #[derive(Debug, Deserialize, JsonSchema)]
//! struct WorkerParameters {}
//!
//! impl MessageEvent<WorkerParameters> for WorkerNameEvent {
//! fn get_name(&self) -> String {"sample_worker".to_string()}
//! fn get_short_description(&self) -> String {"Short description".to_string()}
//! fn get_description(&self) -> String {"Long description".to_string()}
//! fn get_version(&self) -> Version { Version::new(0, 0, 1) }
//! }
//! static WORKER_NAME_EVENT: WorkerNameEvent = WorkerNameEvent {};
//!
//! // uncomment it to start the worker
//! // fn main() { | //! // }
//! ```
//!
//! ## Runtime configuration
//!
//! ### AMQP connection
//!
//! | Variable | Description |
//! |-----------------|-------------|
//! | `AMQP_HOSTNAME` | IP or host of AMQP server (default: `localhost`) |
//! | `AMQP_PORT` | AMQP server port (default: `5672`) |
//! | `AMQP_TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) |
//! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) |
//! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) |
//! | `AMQP_VHOST` | AMQP virtual host (default: `/`) |
//! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) |
//!
//! ### Vault connection
//!
//! | Variable | Description |
//! |--------------------|-------------|
//! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) |
//! | `BACKEND_USERNAME` | Username used to connect to backend server |
//! | `BACKEND_PASSWORD` | Password used to connect to backend server |
//!
//! ## Start worker locally
//!
//! MCAI Worker SDK can be launched locally - without RabbitMQ.
//! It can process some message for different purpose (functional tests, message order examples, etc.).
//!
//! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders.
//! It can take multiple orders, joined with `:` on unix platform, `;` on windows os.
//!
//! ### Examples:
//!
//! ```bash
//! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker
//! ```
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[cfg(feature = "media")]
#[macro_use]
extern crate yaserde_derive;
mod channels;
mod config;
mod error;
pub mod job;
pub mod message;
pub mod parameter;
pub mod worker;
/// Re-export from lapin Channel
pub use lapin::Channel;
pub use log::{debug, error, info, trace, warn};
pub use schemars::JsonSchema;
/// Re-export from semver:
pub use semver::Version;
pub use error::{MessageError, Result};
#[cfg(feature = "media")]
pub use message::media::{
audio::AudioFormat,
ebu_ttml_live::{
Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title,
},
filters::{AudioFilter, GenericFilter, VideoFilter},
video::{RegionOfInterest, Scaling, VideoFormat},
StreamDescriptor,
};
pub use message::publish_job_progression;
pub use parameter::container::ParametersContainer;
pub use parameter::{Parameter, ParameterValue, Requirement};
#[cfg(feature = "media")]
pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame};
use crate::worker::docker;
use chrono::prelude::*;
use config::*;
use env_logger::Builder;
use futures_executor::LocalPool;
use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt};
use job::JobResult;
use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties};
use serde::de::DeserializeOwned;
#[cfg(feature = "media")]
use serde::Serialize;
use std::str::FromStr;
#[cfg(feature = "media")]
use std::sync::{mpsc::Sender, Mutex};
use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time};
#[cfg(feature = "media")]
use yaserde::YaSerialize;
/// Exposed Channel type
pub type McaiChannel = Arc<Channel>;
#[cfg(feature = "media")]
#[derive(Debug)]
pub struct ProcessResult {
end_of_process: bool,
json_content: Option<String>,
xml_content: Option<String>,
}
#[cfg(feature = "media")]
impl ProcessResult {
pub fn empty() -> Self {
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: None,
}
}
pub fn end_of_process() -> Self {
ProcessResult {
end_of_process: true,
json_content: None,
xml_content: None,
}
}
pub fn new_json<S: Serialize>(content: S) -> Self {
let content = serde_json::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: Some(content),
xml_content: None,
}
}
pub fn new_xml<Y: YaSerialize>(content: Y) -> Self {
let content = yaserde::ser::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: Some(content),
}
}
}
#[cfg(feature = "media")]
pub enum ProcessFrame {
AudioVideo(Frame),
EbuTtmlLive(Box<EbuTtmlLive>),
Data(Vec<u8>),
}
#[cfg(feature = "media")]
impl ProcessFrame {
pub fn get_pts(&self) -> i64 {
match self {
ProcessFrame::AudioVideo(frame) => frame.get_pts(),
ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => {
// improvement: support pts to terminate
0
}
}
}
}
/// # Trait to describe a worker
/// Implement this trait to implement a worker
pub trait MessageEvent<P: DeserializeOwned + JsonSchema> {
fn get_name(&self) -> String;
fn get_short_description(&self) -> String;
fn get_description(&self) -> String;
fn get_version(&self) -> semver::Version;
fn init(&mut self) -> Result<()> {
Ok(())
}
#[cfg(feature = "media")]
fn init_process(
&mut self,
_parameters: P,
_format_context: Arc<Mutex<FormatContext>>,
_response_sender: Arc<Mutex<Sender<ProcessResult>>>,
) -> Result<Vec<StreamDescriptor>> {
Ok(vec![])
}
#[cfg(feature = "media")]
fn process_frame(
&mut self,
_job_result: JobResult,
_stream_index: usize,
_frame: ProcessFrame,
) -> Result<ProcessResult> {
Err(MessageError::NotImplemented())
}
#[cfg(feature = "media")]
fn ending_process(&mut self) -> Result<()> {
Ok(())
}
/// Not called when the "media" feature is enabled
fn process(
&self,
_channel: Option<McaiChannel>,
_parameters: P,
_job_result: JobResult,
) -> Result<JobResult>
where
Self: std::marker::Sized,
{
Err(MessageError::NotImplemented())
}
}
/// Function to start a worker
pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME)
where
ME: std::marker::Sync,
{
let mut builder = Builder::from_default_env();
let amqp_queue = get_amqp_queue();
let instance_id = docker::get_instance_id("/proc/self/cgroup");
let container_id = instance_id.clone();
builder
.format(move |stream, record| {
writeln!(
stream,
"{} - {} - {} - {} - {} - {}",
Utc::now(),
&container_id,
get_amqp_queue(),
record.target().parse::<i64>().unwrap_or(-1),
record.level(),
record.args(),
)
})
.init();
let worker_configuration =
worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id);
if let Err(configuration_error) = worker_configuration {
error!("{:?}", configuration_error);
return;
}
let worker_configuration = worker_configuration.unwrap();
info!(
"Worker: {}, version: {} (MCAI Worker SDK {})",
worker_configuration.get_worker_name(),
worker_configuration.get_worker_version(),
worker_configuration.get_sdk_version(),
);
if let Ok(enabled) = std::env::var("DESCRIBE") {
if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) {
match serde_json::to_string_pretty(&worker_configuration) {
Ok(serialized_configuration) => {
println!("{}", serialized_configuration);
return;
}
Err(error) => error!("Could not serialize worker configuration: {:?}", error),
}
}
}
if let Err(message) = message_event.init() {
error!("{:?}", message);
return;
}
let message_event_ref = Rc::new(RefCell::new(message_event));
info!("Worker initialized, ready to receive jobs");
if let Some(source_orders) = get_source_orders() {
warn!("Worker will process source orders");
for source_order in &source_orders {
info!("Start to process order: {:?}", source_order);
let count = None;
let channel = None;
let message_data = fs::read_to_string(source_order).unwrap();
let result = message::parse_and_process_message(
message_event_ref.clone(),
&message_data,
count,
channel,
message::publish_job_progression,
);
match result {
Ok(mut job_result) => {
job_result.update_execution_duration();
info!(target: &job_result.get_job_id().to_string(), "Process succeeded: {:?}", job_result)
}
Err(message) => {
error!("{:?}", message);
}
}
}
return;
}
loop {
let amqp_uri = get_amqp_uri();
let mut executor = LocalPool::new();
let spawner = executor.spawner();
executor.run_until(async {
let conn = Connection::connect_uri(
amqp_uri,
ConnectionProperties::default().with_default_executor(8),
)
.wait()
.unwrap();
info!("Connected");
let channel = Arc::new(channels::declare_consumer_channel(
&conn,
&worker_configuration,
));
let consumer = channel
.clone()
.basic_consume(
&amqp_queue,
"amqp_worker",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.unwrap();
let status_consumer = channel
.clone()
.basic_consume(
&worker_configuration.get_direct_messaging_queue_name(),
"status_amqp_worker",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.unwrap();
let status_response_channel = channel.clone();
let status_worker_configuration = worker_configuration.clone();
let _consumer = spawner.spawn_local(async move {
status_consumer
.for_each(move |delivery| {
let (_channel, delivery) = delivery.expect("error caught in in consumer");
worker::system_information::send_real_time_information(
delivery,
&status_response_channel,
&status_worker_configuration,
)
.map(|_| ())
})
.await
});
info!("Start to consume on queue {:?}", amqp_queue);
let clone_channel = channel.clone();
let message_event = message_event_ref.clone();
consumer
.for_each(move |delivery| {
let (_channel, delivery) = delivery.expect("error caught in in consumer");
message::process_message(message_event.clone(), delivery, clone_channel.clone())
.map(|_| ())
})
.await
});
let sleep_duration = time::Duration::new(1, 0);
thread::sleep(sleep_duration);
info!("Reconnection...");
}
}
#[test]
fn empty_message_event_impl() {
#[derive(Debug)]
struct CustomEvent {}
#[derive(JsonSchema, Deserialize)]
struct CustomParameters {}
impl MessageEvent<CustomParameters> for CustomEvent {
fn get_name(&self) -> String {
"custom".to_string()
}
fn get_short_description(&self) -> String {
"short description".to_string()
}
fn get_description(&self) -> String {
"long description".to_string()
}
fn get_version(&self) -> semver::Version {
semver::Version::new(1, 2, 3)
}
}
let custom_event = CustomEvent {};
let parameters = CustomParameters {};
let job = job::Job {
job_id: 1234,
parameters: vec![],
};
let job_result = job::JobResult::new(job.job_id);
let result = custom_event.process(None, parameters, job_result);
assert!(result == Err(MessageError::NotImplemented()));
} | //! // mcai_worker_sdk::start_worker(&WORKER_NAME_EVENT); | random_line_split |
lib.rs | //! # MCAI Worker SDK
//!
//! This library is an SDK to communicate via message broker with [StepFlow](https://hexdocs.pm/step_flow/readme.html).
//! It's used for every worker as an abstraction.
//! It manage itself requirements, message parsing, direct messaging.
//!
//! ## Worker implementation
//!
//! 1. Create a Rust project
//! 2. Add MCAI Worker SDK as a dependency in Cargo.toml: `mcai_worker_sdk = "^1.0"`
//! 1. Update the main file with the example provided here to implement [MessageEvent](trait.MessageEvent.html) trait,
//! and call the [`start_worker`](fn.start_worker.html) to start the worker itself.
//!
//! ```rust
//! use mcai_worker_sdk::{
//! MessageEvent,
//! Version,
//! worker::Parameter,
//! };
//! use serde_derive::Deserialize;
//! use schemars::JsonSchema;
//!
//! #[derive(Debug)]
//! struct WorkerNameEvent {}
//!
//! #[derive(Debug, Deserialize, JsonSchema)]
//! struct WorkerParameters {}
//!
//! impl MessageEvent<WorkerParameters> for WorkerNameEvent {
//! fn get_name(&self) -> String {"sample_worker".to_string()}
//! fn get_short_description(&self) -> String {"Short description".to_string()}
//! fn get_description(&self) -> String {"Long description".to_string()}
//! fn get_version(&self) -> Version { Version::new(0, 0, 1) }
//! }
//! static WORKER_NAME_EVENT: WorkerNameEvent = WorkerNameEvent {};
//!
//! // uncomment it to start the worker
//! // fn main() {
//! // mcai_worker_sdk::start_worker(&WORKER_NAME_EVENT);
//! // }
//! ```
//!
//! ## Runtime configuration
//!
//! ### AMQP connection
//!
//! | Variable | Description |
//! |-----------------|-------------|
//! | `AMQP_HOSTNAME` | IP or host of AMQP server (default: `localhost`) |
//! | `AMQP_PORT` | AMQP server port (default: `5672`) |
//! | `AMQP_TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) |
//! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) |
//! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) |
//! | `AMQP_VHOST` | AMQP virtual host (default: `/`) |
//! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) |
//!
//! ### Vault connection
//!
//! | Variable | Description |
//! |--------------------|-------------|
//! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) |
//! | `BACKEND_USERNAME` | Username used to connect to backend server |
//! | `BACKEND_PASSWORD` | Password used to connect to backend server |
//!
//! ## Start worker locally
//!
//! MCAI Worker SDK can be launched locally - without RabbitMQ.
//! It can process some message for different purpose (functional tests, message order examples, etc.).
//!
//! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders.
//! It can take multiple orders, joined with `:` on unix platform, `;` on windows os.
//!
//! ### Examples:
//!
//! ```bash
//! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker
//! ```
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[cfg(feature = "media")]
#[macro_use]
extern crate yaserde_derive;
mod channels;
mod config;
mod error;
pub mod job;
pub mod message;
pub mod parameter;
pub mod worker;
/// Re-export from lapin Channel
pub use lapin::Channel;
pub use log::{debug, error, info, trace, warn};
pub use schemars::JsonSchema;
/// Re-export from semver:
pub use semver::Version;
pub use error::{MessageError, Result};
#[cfg(feature = "media")]
pub use message::media::{
audio::AudioFormat,
ebu_ttml_live::{
Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title,
},
filters::{AudioFilter, GenericFilter, VideoFilter},
video::{RegionOfInterest, Scaling, VideoFormat},
StreamDescriptor,
};
pub use message::publish_job_progression;
pub use parameter::container::ParametersContainer;
pub use parameter::{Parameter, ParameterValue, Requirement};
#[cfg(feature = "media")]
pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame};
use crate::worker::docker;
use chrono::prelude::*;
use config::*;
use env_logger::Builder;
use futures_executor::LocalPool;
use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt};
use job::JobResult;
use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties};
use serde::de::DeserializeOwned;
#[cfg(feature = "media")]
use serde::Serialize;
use std::str::FromStr;
#[cfg(feature = "media")]
use std::sync::{mpsc::Sender, Mutex};
use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time};
#[cfg(feature = "media")]
use yaserde::YaSerialize;
/// Exposed Channel type
pub type McaiChannel = Arc<Channel>;
#[cfg(feature = "media")]
#[derive(Debug)]
pub struct ProcessResult {
end_of_process: bool,
json_content: Option<String>,
xml_content: Option<String>,
}
#[cfg(feature = "media")]
impl ProcessResult {
pub fn empty() -> Self {
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: None,
}
}
pub fn end_of_process() -> Self {
ProcessResult {
end_of_process: true,
json_content: None,
xml_content: None,
}
}
pub fn new_json<S: Serialize>(content: S) -> Self {
let content = serde_json::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: Some(content),
xml_content: None,
}
}
pub fn new_xml<Y: YaSerialize>(content: Y) -> Self {
let content = yaserde::ser::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: Some(content),
}
}
}
#[cfg(feature = "media")]
pub enum ProcessFrame {
AudioVideo(Frame),
EbuTtmlLive(Box<EbuTtmlLive>),
Data(Vec<u8>),
}
#[cfg(feature = "media")]
impl ProcessFrame {
pub fn get_pts(&self) -> i64 {
match self {
ProcessFrame::AudioVideo(frame) => frame.get_pts(),
ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => {
// improvement: support pts to terminate
0
}
}
}
}
/// # Trait to describe a worker
/// Implement this trait to implement a worker
pub trait MessageEvent<P: DeserializeOwned + JsonSchema> {
fn get_name(&self) -> String;
fn get_short_description(&self) -> String;
fn get_description(&self) -> String;
fn get_version(&self) -> semver::Version;
fn init(&mut self) -> Result<()> {
Ok(())
}
#[cfg(feature = "media")]
fn init_process(
&mut self,
_parameters: P,
_format_context: Arc<Mutex<FormatContext>>,
_response_sender: Arc<Mutex<Sender<ProcessResult>>>,
) -> Result<Vec<StreamDescriptor>> {
Ok(vec![])
}
#[cfg(feature = "media")]
fn | (
&mut self,
_job_result: JobResult,
_stream_index: usize,
_frame: ProcessFrame,
) -> Result<ProcessResult> {
Err(MessageError::NotImplemented())
}
#[cfg(feature = "media")]
fn ending_process(&mut self) -> Result<()> {
Ok(())
}
/// Not called when the "media" feature is enabled
fn process(
&self,
_channel: Option<McaiChannel>,
_parameters: P,
_job_result: JobResult,
) -> Result<JobResult>
where
Self: std::marker::Sized,
{
Err(MessageError::NotImplemented())
}
}
/// Function to start a worker
pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME)
where
ME: std::marker::Sync,
{
let mut builder = Builder::from_default_env();
let amqp_queue = get_amqp_queue();
let instance_id = docker::get_instance_id("/proc/self/cgroup");
let container_id = instance_id.clone();
builder
.format(move |stream, record| {
writeln!(
stream,
"{} - {} - {} - {} - {} - {}",
Utc::now(),
&container_id,
get_amqp_queue(),
record.target().parse::<i64>().unwrap_or(-1),
record.level(),
record.args(),
)
})
.init();
let worker_configuration =
worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id);
if let Err(configuration_error) = worker_configuration {
error!("{:?}", configuration_error);
return;
}
let worker_configuration = worker_configuration.unwrap();
info!(
"Worker: {}, version: {} (MCAI Worker SDK {})",
worker_configuration.get_worker_name(),
worker_configuration.get_worker_version(),
worker_configuration.get_sdk_version(),
);
if let Ok(enabled) = std::env::var("DESCRIBE") {
if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) {
match serde_json::to_string_pretty(&worker_configuration) {
Ok(serialized_configuration) => {
println!("{}", serialized_configuration);
return;
}
Err(error) => error!("Could not serialize worker configuration: {:?}", error),
}
}
}
if let Err(message) = message_event.init() {
error!("{:?}", message);
return;
}
let message_event_ref = Rc::new(RefCell::new(message_event));
info!("Worker initialized, ready to receive jobs");
if let Some(source_orders) = get_source_orders() {
warn!("Worker will process source orders");
for source_order in &source_orders {
info!("Start to process order: {:?}", source_order);
let count = None;
let channel = None;
let message_data = fs::read_to_string(source_order).unwrap();
let result = message::parse_and_process_message(
message_event_ref.clone(),
&message_data,
count,
channel,
message::publish_job_progression,
);
match result {
Ok(mut job_result) => {
job_result.update_execution_duration();
info!(target: &job_result.get_job_id().to_string(), "Process succeeded: {:?}", job_result)
}
Err(message) => {
error!("{:?}", message);
}
}
}
return;
}
loop {
let amqp_uri = get_amqp_uri();
let mut executor = LocalPool::new();
let spawner = executor.spawner();
executor.run_until(async {
let conn = Connection::connect_uri(
amqp_uri,
ConnectionProperties::default().with_default_executor(8),
)
.wait()
.unwrap();
info!("Connected");
let channel = Arc::new(channels::declare_consumer_channel(
&conn,
&worker_configuration,
));
let consumer = channel
.clone()
.basic_consume(
&amqp_queue,
"amqp_worker",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.unwrap();
let status_consumer = channel
.clone()
.basic_consume(
&worker_configuration.get_direct_messaging_queue_name(),
"status_amqp_worker",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.unwrap();
let status_response_channel = channel.clone();
let status_worker_configuration = worker_configuration.clone();
let _consumer = spawner.spawn_local(async move {
status_consumer
.for_each(move |delivery| {
let (_channel, delivery) = delivery.expect("error caught in in consumer");
worker::system_information::send_real_time_information(
delivery,
&status_response_channel,
&status_worker_configuration,
)
.map(|_| ())
})
.await
});
info!("Start to consume on queue {:?}", amqp_queue);
let clone_channel = channel.clone();
let message_event = message_event_ref.clone();
consumer
.for_each(move |delivery| {
let (_channel, delivery) = delivery.expect("error caught in in consumer");
message::process_message(message_event.clone(), delivery, clone_channel.clone())
.map(|_| ())
})
.await
});
let sleep_duration = time::Duration::new(1, 0);
thread::sleep(sleep_duration);
info!("Reconnection...");
}
}
#[test]
fn empty_message_event_impl() {
#[derive(Debug)]
struct CustomEvent {}
#[derive(JsonSchema, Deserialize)]
struct CustomParameters {}
impl MessageEvent<CustomParameters> for CustomEvent {
fn get_name(&self) -> String {
"custom".to_string()
}
fn get_short_description(&self) -> String {
"short description".to_string()
}
fn get_description(&self) -> String {
"long description".to_string()
}
fn get_version(&self) -> semver::Version {
semver::Version::new(1, 2, 3)
}
}
let custom_event = CustomEvent {};
let parameters = CustomParameters {};
let job = job::Job {
job_id: 1234,
parameters: vec![],
};
let job_result = job::JobResult::new(job.job_id);
let result = custom_event.process(None, parameters, job_result);
assert!(result == Err(MessageError::NotImplemented()));
}
| process_frame | identifier_name |
lib.rs | //! # MCAI Worker SDK
//!
//! This library is an SDK to communicate via message broker with [StepFlow](https://hexdocs.pm/step_flow/readme.html).
//! It's used for every worker as an abstraction.
//! It manage itself requirements, message parsing, direct messaging.
//!
//! ## Worker implementation
//!
//! 1. Create a Rust project
//! 2. Add MCAI Worker SDK as a dependency in Cargo.toml: `mcai_worker_sdk = "^1.0"`
//! 1. Update the main file with the example provided here to implement [MessageEvent](trait.MessageEvent.html) trait,
//! and call the [`start_worker`](fn.start_worker.html) to start the worker itself.
//!
//! ```rust
//! use mcai_worker_sdk::{
//! MessageEvent,
//! Version,
//! worker::Parameter,
//! };
//! use serde_derive::Deserialize;
//! use schemars::JsonSchema;
//!
//! #[derive(Debug)]
//! struct WorkerNameEvent {}
//!
//! #[derive(Debug, Deserialize, JsonSchema)]
//! struct WorkerParameters {}
//!
//! impl MessageEvent<WorkerParameters> for WorkerNameEvent {
//! fn get_name(&self) -> String {"sample_worker".to_string()}
//! fn get_short_description(&self) -> String {"Short description".to_string()}
//! fn get_description(&self) -> String {"Long description".to_string()}
//! fn get_version(&self) -> Version { Version::new(0, 0, 1) }
//! }
//! static WORKER_NAME_EVENT: WorkerNameEvent = WorkerNameEvent {};
//!
//! // uncomment it to start the worker
//! // fn main() {
//! // mcai_worker_sdk::start_worker(&WORKER_NAME_EVENT);
//! // }
//! ```
//!
//! ## Runtime configuration
//!
//! ### AMQP connection
//!
//! | Variable | Description |
//! |-----------------|-------------|
//! | `AMQP_HOSTNAME` | IP or host of AMQP server (default: `localhost`) |
//! | `AMQP_PORT` | AMQP server port (default: `5672`) |
//! | `AMQP_TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) |
//! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) |
//! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) |
//! | `AMQP_VHOST` | AMQP virtual host (default: `/`) |
//! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) |
//!
//! ### Vault connection
//!
//! | Variable | Description |
//! |--------------------|-------------|
//! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) |
//! | `BACKEND_USERNAME` | Username used to connect to backend server |
//! | `BACKEND_PASSWORD` | Password used to connect to backend server |
//!
//! ## Start worker locally
//!
//! MCAI Worker SDK can be launched locally - without RabbitMQ.
//! It can process some message for different purpose (functional tests, message order examples, etc.).
//!
//! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders.
//! It can take multiple orders, joined with `:` on unix platform, `;` on windows os.
//!
//! ### Examples:
//!
//! ```bash
//! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker
//! ```
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[cfg(feature = "media")]
#[macro_use]
extern crate yaserde_derive;
mod channels;
mod config;
mod error;
pub mod job;
pub mod message;
pub mod parameter;
pub mod worker;
/// Re-export from lapin Channel
pub use lapin::Channel;
pub use log::{debug, error, info, trace, warn};
pub use schemars::JsonSchema;
/// Re-export from semver:
pub use semver::Version;
pub use error::{MessageError, Result};
#[cfg(feature = "media")]
pub use message::media::{
audio::AudioFormat,
ebu_ttml_live::{
Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title,
},
filters::{AudioFilter, GenericFilter, VideoFilter},
video::{RegionOfInterest, Scaling, VideoFormat},
StreamDescriptor,
};
pub use message::publish_job_progression;
pub use parameter::container::ParametersContainer;
pub use parameter::{Parameter, ParameterValue, Requirement};
#[cfg(feature = "media")]
pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame};
use crate::worker::docker;
use chrono::prelude::*;
use config::*;
use env_logger::Builder;
use futures_executor::LocalPool;
use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt};
use job::JobResult;
use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties};
use serde::de::DeserializeOwned;
#[cfg(feature = "media")]
use serde::Serialize;
use std::str::FromStr;
#[cfg(feature = "media")]
use std::sync::{mpsc::Sender, Mutex};
use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time};
#[cfg(feature = "media")]
use yaserde::YaSerialize;
/// Exposed Channel type
pub type McaiChannel = Arc<Channel>;
#[cfg(feature = "media")]
#[derive(Debug)]
pub struct ProcessResult {
end_of_process: bool,
json_content: Option<String>,
xml_content: Option<String>,
}
#[cfg(feature = "media")]
impl ProcessResult {
pub fn empty() -> Self {
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: None,
}
}
pub fn end_of_process() -> Self {
ProcessResult {
end_of_process: true,
json_content: None,
xml_content: None,
}
}
pub fn new_json<S: Serialize>(content: S) -> Self {
let content = serde_json::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: Some(content),
xml_content: None,
}
}
pub fn new_xml<Y: YaSerialize>(content: Y) -> Self {
let content = yaserde::ser::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: Some(content),
}
}
}
#[cfg(feature = "media")]
pub enum ProcessFrame {
AudioVideo(Frame),
EbuTtmlLive(Box<EbuTtmlLive>),
Data(Vec<u8>),
}
#[cfg(feature = "media")]
impl ProcessFrame {
pub fn get_pts(&self) -> i64 {
match self {
ProcessFrame::AudioVideo(frame) => frame.get_pts(),
ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => {
// improvement: support pts to terminate
0
}
}
}
}
/// # Trait to describe a worker
/// Implement this trait to implement a worker
pub trait MessageEvent<P: DeserializeOwned + JsonSchema> {
fn get_name(&self) -> String;
fn get_short_description(&self) -> String;
fn get_description(&self) -> String;
fn get_version(&self) -> semver::Version;
fn init(&mut self) -> Result<()> {
Ok(())
}
#[cfg(feature = "media")]
fn init_process(
&mut self,
_parameters: P,
_format_context: Arc<Mutex<FormatContext>>,
_response_sender: Arc<Mutex<Sender<ProcessResult>>>,
) -> Result<Vec<StreamDescriptor>> {
Ok(vec![])
}
#[cfg(feature = "media")]
fn process_frame(
&mut self,
_job_result: JobResult,
_stream_index: usize,
_frame: ProcessFrame,
) -> Result<ProcessResult> {
Err(MessageError::NotImplemented())
}
#[cfg(feature = "media")]
fn ending_process(&mut self) -> Result<()> {
Ok(())
}
/// Not called when the "media" feature is enabled
fn process(
&self,
_channel: Option<McaiChannel>,
_parameters: P,
_job_result: JobResult,
) -> Result<JobResult>
where
Self: std::marker::Sized,
{
Err(MessageError::NotImplemented())
}
}
/// Function to start a worker
pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME)
where
ME: std::marker::Sync,
{
let mut builder = Builder::from_default_env();
let amqp_queue = get_amqp_queue();
let instance_id = docker::get_instance_id("/proc/self/cgroup");
let container_id = instance_id.clone();
builder
.format(move |stream, record| {
writeln!(
stream,
"{} - {} - {} - {} - {} - {}",
Utc::now(),
&container_id,
get_amqp_queue(),
record.target().parse::<i64>().unwrap_or(-1),
record.level(),
record.args(),
)
})
.init();
let worker_configuration =
worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id);
if let Err(configuration_error) = worker_configuration {
error!("{:?}", configuration_error);
return;
}
let worker_configuration = worker_configuration.unwrap();
info!(
"Worker: {}, version: {} (MCAI Worker SDK {})",
worker_configuration.get_worker_name(),
worker_configuration.get_worker_version(),
worker_configuration.get_sdk_version(),
);
if let Ok(enabled) = std::env::var("DESCRIBE") {
if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) {
match serde_json::to_string_pretty(&worker_configuration) {
Ok(serialized_configuration) => {
println!("{}", serialized_configuration);
return;
}
Err(error) => error!("Could not serialize worker configuration: {:?}", error),
}
}
}
if let Err(message) = message_event.init() {
error!("{:?}", message);
return;
}
let message_event_ref = Rc::new(RefCell::new(message_event));
info!("Worker initialized, ready to receive jobs");
if let Some(source_orders) = get_source_orders() {
warn!("Worker will process source orders");
for source_order in &source_orders {
info!("Start to process order: {:?}", source_order);
let count = None;
let channel = None;
let message_data = fs::read_to_string(source_order).unwrap();
let result = message::parse_and_process_message(
message_event_ref.clone(),
&message_data,
count,
channel,
message::publish_job_progression,
);
match result {
Ok(mut job_result) => {
job_result.update_execution_duration();
info!(target: &job_result.get_job_id().to_string(), "Process succeeded: {:?}", job_result)
}
Err(message) => {
error!("{:?}", message);
}
}
}
return;
}
loop {
let amqp_uri = get_amqp_uri();
let mut executor = LocalPool::new();
let spawner = executor.spawner();
executor.run_until(async {
let conn = Connection::connect_uri(
amqp_uri,
ConnectionProperties::default().with_default_executor(8),
)
.wait()
.unwrap();
info!("Connected");
let channel = Arc::new(channels::declare_consumer_channel(
&conn,
&worker_configuration,
));
let consumer = channel
.clone()
.basic_consume(
&amqp_queue,
"amqp_worker",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.unwrap();
let status_consumer = channel
.clone()
.basic_consume(
&worker_configuration.get_direct_messaging_queue_name(),
"status_amqp_worker",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.unwrap();
let status_response_channel = channel.clone();
let status_worker_configuration = worker_configuration.clone();
let _consumer = spawner.spawn_local(async move {
status_consumer
.for_each(move |delivery| {
let (_channel, delivery) = delivery.expect("error caught in in consumer");
worker::system_information::send_real_time_information(
delivery,
&status_response_channel,
&status_worker_configuration,
)
.map(|_| ())
})
.await
});
info!("Start to consume on queue {:?}", amqp_queue);
let clone_channel = channel.clone();
let message_event = message_event_ref.clone();
consumer
.for_each(move |delivery| {
let (_channel, delivery) = delivery.expect("error caught in in consumer");
message::process_message(message_event.clone(), delivery, clone_channel.clone())
.map(|_| ())
})
.await
});
let sleep_duration = time::Duration::new(1, 0);
thread::sleep(sleep_duration);
info!("Reconnection...");
}
}
#[test]
fn empty_message_event_impl() {
#[derive(Debug)]
struct CustomEvent {}
#[derive(JsonSchema, Deserialize)]
struct CustomParameters {}
impl MessageEvent<CustomParameters> for CustomEvent {
fn get_name(&self) -> String {
"custom".to_string()
}
fn get_short_description(&self) -> String {
"short description".to_string()
}
fn get_description(&self) -> String |
fn get_version(&self) -> semver::Version {
semver::Version::new(1, 2, 3)
}
}
let custom_event = CustomEvent {};
let parameters = CustomParameters {};
let job = job::Job {
job_id: 1234,
parameters: vec![],
};
let job_result = job::JobResult::new(job.job_id);
let result = custom_event.process(None, parameters, job_result);
assert!(result == Err(MessageError::NotImplemented()));
}
| {
"long description".to_string()
} | identifier_body |
main.rs | /**
shorturl is a web server that can host shortened URLs.
## Example usage
Creating a link:
```
$ curl -X POST 127.0.0.1:8080/tsauvajon -d "https://linkedin.com/in/tsauvajon"
/tsauvajon now redirects to https://linkedin.com/in/tsauvajon
```
Using it redirects us:
```
$ curl 127.0.0.1:8080/tsauvajon -v
* Trying 127.0.0.1...
* TCP_NODELAY set
* Connected to 127.0.0.1 (127.0.0.1) port 8080 (#0)
> GET /tsauvajon HTTP/1.1
> Host: 127.0.0.1:8080
> User-Agent: curl/7.64.1
> Accept: * / *
>
< HTTP/1.1 302 Found
< content-length: 51
< location: https://linkedin.com/in/tsauvajon
< date: Wed, 19 May 2021 17:36:49 GMT
<
* Connection #0 to host 127.0.0.1 left intact
redirecting to https://linkedin.com/in/tsauvajon...* Closing connection 0
```
*/
use actix_web::{error, get, post, web, App, HttpResponse, HttpServer, Responder};
use futures::StreamExt;
use std::collections::HashMap;
use std::sync::RwLock;
use url::Url;
const MAX_SIZE: usize = 1_024; // max payload size is 1k
const RANDOM_URL_SIZE: usize = 5; // ramdomly generated URLs are 5 characters long
type Db = web::Data<RwLock<HashMap<String, String>>>;
#[get("/{id}")]
async fn browse(db: web::Data<Db>, web::Path(id): web::Path<String>) -> impl Responder {
match db.read() {
Ok(db) => match db.get(&id) {
None => Err(error::ErrorNotFound("not found")),
Some(url) => Ok(HttpResponse::Found()
.header("Location", url.clone())
.body(format!("redirecting to {}...", url))),
},
Err(err) => {
println!("accessing the db: {}", err);
Err(error::ErrorInternalServerError(err.to_string()))
}
}
}
fn hash(input: &str) -> String {
blake3::hash(input.as_bytes()).to_hex()[..RANDOM_URL_SIZE].to_string()
}
async fn read_target(mut payload: web::Payload) -> Result<String, String> {
let mut body = web::BytesMut::new();
while let Some(chunk) = payload.next().await {
let chunk = chunk.or_else(|err| Err(err.to_string()))?;
// limit max size of in-memory payload
if (body.len() + chunk.len()) > MAX_SIZE {
return Err("overflow".to_string());
}
body.extend_from_slice(&chunk);
}
String::from_utf8(body[..].to_vec())
.or_else(|err| Err(format!("invalid request body: {}", err)))
}
fn create_short_url(
db: web::Data<Db>,
target: String,
id: Option<String>,
) -> Result<String, String> |
#[post("/{id}")]
async fn create_with_id(
db: web::Data<Db>,
payload: web::Payload,
web::Path(id): web::Path<String>,
) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, Some(id)).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[post("/")]
async fn create_random(db: web::Data<Db>, payload: web::Payload) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, None).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
HttpServer::new(move || {
App::new()
.data(db.clone())
.service(browse)
.service(create_random)
.service(create_with_id)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_hash() {
assert_eq!("4cca4", hash("something"));
assert_eq!("284a1", hash("something else"));
}
#[test]
fn test_create_short_malformed_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "this is not a valid URL".to_string();
let id = Some("hello".to_string());
assert_eq!(
Err("malformed URL: relative URL without a base".to_string()),
create_short_url(web::Data::new(db), target, id)
);
}
#[test]
fn test_create_short_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com".to_string();
let id = "hello".to_string();
create_short_url(web::Data::new(db.clone()), target.clone(), Some(id.clone())).unwrap();
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_hashed_id() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com";
create_short_url(web::Data::new(db.clone()), target.to_string(), None).unwrap();
let id = hash(target);
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_already_exists() {
let id = "hello".to_string();
let mut db: HashMap<String, String> = HashMap::new();
db.insert(id.clone(), "some existing value".to_string());
let db: Db = web::Data::new(RwLock::new(db));
let target = "https://google.com".to_string();
assert_eq!(
Err("already registered".to_string()),
create_short_url(web::Data::new(db), target, Some(id))
);
}
}
#[cfg(test)]
mod integration_tests {
use super::*;
use actix_web::{
body::Body,
http::{HeaderValue, StatusCode},
test,
};
// create a new custom shorturl
#[actix_rt::test]
async fn integration_test_create_custom_shortened_url() {
let req = test::TestRequest::post()
.uri("/hello")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_with_id)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(db.get("hello"), Some(&"https://hello.world".to_string()));
assert_eq!(db.get("wwerwewrew"), None);
}
// create a new random shorturl
#[actix_rt::test]
async fn integration_test_create_random_shortened_url() {
let req = test::TestRequest::post()
.uri("/")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_random)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(
db.get(&hash("https://hello.world")),
Some(&"https://hello.world".to_string())
);
assert_eq!(db.get("wwerwewrew"), None);
}
// follow an existing shorturl
#[actix_rt::test]
async fn integration_test_use_shortened_url() {
let req = test::TestRequest::get().uri("/hi").to_request();
let mut db: HashMap<String, String> = HashMap::new();
db.insert("hi".into(), "https://linkedin.com/in/tsauvajon".into());
let mut app = test::init_service(
App::new()
.data(web::Data::new(RwLock::new(db)))
.service(browse),
)
.await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::FOUND);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(
&Body::from("redirecting to https://linkedin.com/in/tsauvajon..."),
body
);
assert_eq!(
resp.headers().get("Location"),
Some(&HeaderValue::from_str("https://linkedin.com/in/tsauvajon").unwrap())
)
}
// try to follow a shortened URL that doesn't exist
#[actix_rt::test]
async fn integration_test_link_miss() {
let req = test::TestRequest::get()
.uri("/thislinkdoesntexist")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db).service(browse)).await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(&Body::from("not found"), body);
assert_eq!(resp.headers().get("Location"), None)
}
// try to add a link for an already existing short-url
#[actix_rt::test]
async fn integration_test_collision() {
let req = test::TestRequest::post()
.uri("/alreadyexists")
.set_payload("https://something.new")
.to_request();
let mut db: HashMap<String, String> = HashMap::new();
db.insert(
"alreadyexists".into(),
"https://github.com/tsauvajon".into(),
);
let mut app = test::init_service(
App::new()
.data(web::Data::new(RwLock::new(db)))
.service(create_with_id),
)
.await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(&Body::from("already registered"), body);
}
}
| {
if let Err(err) = Url::parse(&target) {
return Err(format!("malformed URL: {}", err));
};
let id = match id {
Some(id) => id,
None => hash(&target),
};
let mut db = db.write().unwrap();
if db.contains_key(&id) {
Err("already registered".to_string())
} else {
db.insert(id.clone(), target.clone());
Ok(format!("/{} now redirects to {}", id, target))
}
} | identifier_body |
main.rs | /**
shorturl is a web server that can host shortened URLs.
## Example usage
Creating a link:
```
$ curl -X POST 127.0.0.1:8080/tsauvajon -d "https://linkedin.com/in/tsauvajon"
/tsauvajon now redirects to https://linkedin.com/in/tsauvajon
```
Using it redirects us:
```
$ curl 127.0.0.1:8080/tsauvajon -v
* Trying 127.0.0.1...
* TCP_NODELAY set
* Connected to 127.0.0.1 (127.0.0.1) port 8080 (#0)
> GET /tsauvajon HTTP/1.1
> Host: 127.0.0.1:8080
> User-Agent: curl/7.64.1
> Accept: * / *
>
< HTTP/1.1 302 Found
< content-length: 51
< location: https://linkedin.com/in/tsauvajon
< date: Wed, 19 May 2021 17:36:49 GMT
<
* Connection #0 to host 127.0.0.1 left intact | */
use actix_web::{error, get, post, web, App, HttpResponse, HttpServer, Responder};
use futures::StreamExt;
use std::collections::HashMap;
use std::sync::RwLock;
use url::Url;
const MAX_SIZE: usize = 1_024; // max payload size is 1k
const RANDOM_URL_SIZE: usize = 5; // ramdomly generated URLs are 5 characters long
type Db = web::Data<RwLock<HashMap<String, String>>>;
#[get("/{id}")]
async fn browse(db: web::Data<Db>, web::Path(id): web::Path<String>) -> impl Responder {
match db.read() {
Ok(db) => match db.get(&id) {
None => Err(error::ErrorNotFound("not found")),
Some(url) => Ok(HttpResponse::Found()
.header("Location", url.clone())
.body(format!("redirecting to {}...", url))),
},
Err(err) => {
println!("accessing the db: {}", err);
Err(error::ErrorInternalServerError(err.to_string()))
}
}
}
fn hash(input: &str) -> String {
blake3::hash(input.as_bytes()).to_hex()[..RANDOM_URL_SIZE].to_string()
}
async fn read_target(mut payload: web::Payload) -> Result<String, String> {
let mut body = web::BytesMut::new();
while let Some(chunk) = payload.next().await {
let chunk = chunk.or_else(|err| Err(err.to_string()))?;
// limit max size of in-memory payload
if (body.len() + chunk.len()) > MAX_SIZE {
return Err("overflow".to_string());
}
body.extend_from_slice(&chunk);
}
String::from_utf8(body[..].to_vec())
.or_else(|err| Err(format!("invalid request body: {}", err)))
}
fn create_short_url(
db: web::Data<Db>,
target: String,
id: Option<String>,
) -> Result<String, String> {
if let Err(err) = Url::parse(&target) {
return Err(format!("malformed URL: {}", err));
};
let id = match id {
Some(id) => id,
None => hash(&target),
};
let mut db = db.write().unwrap();
if db.contains_key(&id) {
Err("already registered".to_string())
} else {
db.insert(id.clone(), target.clone());
Ok(format!("/{} now redirects to {}", id, target))
}
}
#[post("/{id}")]
async fn create_with_id(
db: web::Data<Db>,
payload: web::Payload,
web::Path(id): web::Path<String>,
) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, Some(id)).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[post("/")]
async fn create_random(db: web::Data<Db>, payload: web::Payload) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, None).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
HttpServer::new(move || {
App::new()
.data(db.clone())
.service(browse)
.service(create_random)
.service(create_with_id)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_hash() {
assert_eq!("4cca4", hash("something"));
assert_eq!("284a1", hash("something else"));
}
#[test]
fn test_create_short_malformed_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "this is not a valid URL".to_string();
let id = Some("hello".to_string());
assert_eq!(
Err("malformed URL: relative URL without a base".to_string()),
create_short_url(web::Data::new(db), target, id)
);
}
#[test]
fn test_create_short_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com".to_string();
let id = "hello".to_string();
create_short_url(web::Data::new(db.clone()), target.clone(), Some(id.clone())).unwrap();
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_hashed_id() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com";
create_short_url(web::Data::new(db.clone()), target.to_string(), None).unwrap();
let id = hash(target);
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_already_exists() {
let id = "hello".to_string();
let mut db: HashMap<String, String> = HashMap::new();
db.insert(id.clone(), "some existing value".to_string());
let db: Db = web::Data::new(RwLock::new(db));
let target = "https://google.com".to_string();
assert_eq!(
Err("already registered".to_string()),
create_short_url(web::Data::new(db), target, Some(id))
);
}
}
#[cfg(test)]
mod integration_tests {
use super::*;
use actix_web::{
body::Body,
http::{HeaderValue, StatusCode},
test,
};
// create a new custom shorturl
#[actix_rt::test]
async fn integration_test_create_custom_shortened_url() {
let req = test::TestRequest::post()
.uri("/hello")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_with_id)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(db.get("hello"), Some(&"https://hello.world".to_string()));
assert_eq!(db.get("wwerwewrew"), None);
}
// create a new random shorturl
#[actix_rt::test]
async fn integration_test_create_random_shortened_url() {
let req = test::TestRequest::post()
.uri("/")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_random)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(
db.get(&hash("https://hello.world")),
Some(&"https://hello.world".to_string())
);
assert_eq!(db.get("wwerwewrew"), None);
}
// follow an existing shorturl
#[actix_rt::test]
async fn integration_test_use_shortened_url() {
let req = test::TestRequest::get().uri("/hi").to_request();
let mut db: HashMap<String, String> = HashMap::new();
db.insert("hi".into(), "https://linkedin.com/in/tsauvajon".into());
let mut app = test::init_service(
App::new()
.data(web::Data::new(RwLock::new(db)))
.service(browse),
)
.await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::FOUND);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(
&Body::from("redirecting to https://linkedin.com/in/tsauvajon..."),
body
);
assert_eq!(
resp.headers().get("Location"),
Some(&HeaderValue::from_str("https://linkedin.com/in/tsauvajon").unwrap())
)
}
// try to follow a shortened URL that doesn't exist
#[actix_rt::test]
async fn integration_test_link_miss() {
let req = test::TestRequest::get()
.uri("/thislinkdoesntexist")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db).service(browse)).await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(&Body::from("not found"), body);
assert_eq!(resp.headers().get("Location"), None)
}
// try to add a link for an already existing short-url
#[actix_rt::test]
async fn integration_test_collision() {
let req = test::TestRequest::post()
.uri("/alreadyexists")
.set_payload("https://something.new")
.to_request();
let mut db: HashMap<String, String> = HashMap::new();
db.insert(
"alreadyexists".into(),
"https://github.com/tsauvajon".into(),
);
let mut app = test::init_service(
App::new()
.data(web::Data::new(RwLock::new(db)))
.service(create_with_id),
)
.await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(&Body::from("already registered"), body);
}
} | redirecting to https://linkedin.com/in/tsauvajon...* Closing connection 0
``` | random_line_split |
main.rs | /**
shorturl is a web server that can host shortened URLs.
## Example usage
Creating a link:
```
$ curl -X POST 127.0.0.1:8080/tsauvajon -d "https://linkedin.com/in/tsauvajon"
/tsauvajon now redirects to https://linkedin.com/in/tsauvajon
```
Using it redirects us:
```
$ curl 127.0.0.1:8080/tsauvajon -v
* Trying 127.0.0.1...
* TCP_NODELAY set
* Connected to 127.0.0.1 (127.0.0.1) port 8080 (#0)
> GET /tsauvajon HTTP/1.1
> Host: 127.0.0.1:8080
> User-Agent: curl/7.64.1
> Accept: * / *
>
< HTTP/1.1 302 Found
< content-length: 51
< location: https://linkedin.com/in/tsauvajon
< date: Wed, 19 May 2021 17:36:49 GMT
<
* Connection #0 to host 127.0.0.1 left intact
redirecting to https://linkedin.com/in/tsauvajon...* Closing connection 0
```
*/
use actix_web::{error, get, post, web, App, HttpResponse, HttpServer, Responder};
use futures::StreamExt;
use std::collections::HashMap;
use std::sync::RwLock;
use url::Url;
const MAX_SIZE: usize = 1_024; // max payload size is 1k
const RANDOM_URL_SIZE: usize = 5; // ramdomly generated URLs are 5 characters long
type Db = web::Data<RwLock<HashMap<String, String>>>;
#[get("/{id}")]
async fn | (db: web::Data<Db>, web::Path(id): web::Path<String>) -> impl Responder {
match db.read() {
Ok(db) => match db.get(&id) {
None => Err(error::ErrorNotFound("not found")),
Some(url) => Ok(HttpResponse::Found()
.header("Location", url.clone())
.body(format!("redirecting to {}...", url))),
},
Err(err) => {
println!("accessing the db: {}", err);
Err(error::ErrorInternalServerError(err.to_string()))
}
}
}
fn hash(input: &str) -> String {
blake3::hash(input.as_bytes()).to_hex()[..RANDOM_URL_SIZE].to_string()
}
async fn read_target(mut payload: web::Payload) -> Result<String, String> {
let mut body = web::BytesMut::new();
while let Some(chunk) = payload.next().await {
let chunk = chunk.or_else(|err| Err(err.to_string()))?;
// limit max size of in-memory payload
if (body.len() + chunk.len()) > MAX_SIZE {
return Err("overflow".to_string());
}
body.extend_from_slice(&chunk);
}
String::from_utf8(body[..].to_vec())
.or_else(|err| Err(format!("invalid request body: {}", err)))
}
fn create_short_url(
db: web::Data<Db>,
target: String,
id: Option<String>,
) -> Result<String, String> {
if let Err(err) = Url::parse(&target) {
return Err(format!("malformed URL: {}", err));
};
let id = match id {
Some(id) => id,
None => hash(&target),
};
let mut db = db.write().unwrap();
if db.contains_key(&id) {
Err("already registered".to_string())
} else {
db.insert(id.clone(), target.clone());
Ok(format!("/{} now redirects to {}", id, target))
}
}
#[post("/{id}")]
async fn create_with_id(
db: web::Data<Db>,
payload: web::Payload,
web::Path(id): web::Path<String>,
) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, Some(id)).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[post("/")]
async fn create_random(db: web::Data<Db>, payload: web::Payload) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, None).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
HttpServer::new(move || {
App::new()
.data(db.clone())
.service(browse)
.service(create_random)
.service(create_with_id)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_hash() {
assert_eq!("4cca4", hash("something"));
assert_eq!("284a1", hash("something else"));
}
#[test]
fn test_create_short_malformed_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "this is not a valid URL".to_string();
let id = Some("hello".to_string());
assert_eq!(
Err("malformed URL: relative URL without a base".to_string()),
create_short_url(web::Data::new(db), target, id)
);
}
#[test]
fn test_create_short_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com".to_string();
let id = "hello".to_string();
create_short_url(web::Data::new(db.clone()), target.clone(), Some(id.clone())).unwrap();
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_hashed_id() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com";
create_short_url(web::Data::new(db.clone()), target.to_string(), None).unwrap();
let id = hash(target);
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_already_exists() {
let id = "hello".to_string();
let mut db: HashMap<String, String> = HashMap::new();
db.insert(id.clone(), "some existing value".to_string());
let db: Db = web::Data::new(RwLock::new(db));
let target = "https://google.com".to_string();
assert_eq!(
Err("already registered".to_string()),
create_short_url(web::Data::new(db), target, Some(id))
);
}
}
#[cfg(test)]
mod integration_tests {
use super::*;
use actix_web::{
body::Body,
http::{HeaderValue, StatusCode},
test,
};
// create a new custom shorturl
#[actix_rt::test]
async fn integration_test_create_custom_shortened_url() {
let req = test::TestRequest::post()
.uri("/hello")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_with_id)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(db.get("hello"), Some(&"https://hello.world".to_string()));
assert_eq!(db.get("wwerwewrew"), None);
}
// create a new random shorturl
#[actix_rt::test]
async fn integration_test_create_random_shortened_url() {
let req = test::TestRequest::post()
.uri("/")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_random)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(
db.get(&hash("https://hello.world")),
Some(&"https://hello.world".to_string())
);
assert_eq!(db.get("wwerwewrew"), None);
}
// follow an existing shorturl
#[actix_rt::test]
async fn integration_test_use_shortened_url() {
let req = test::TestRequest::get().uri("/hi").to_request();
let mut db: HashMap<String, String> = HashMap::new();
db.insert("hi".into(), "https://linkedin.com/in/tsauvajon".into());
let mut app = test::init_service(
App::new()
.data(web::Data::new(RwLock::new(db)))
.service(browse),
)
.await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::FOUND);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(
&Body::from("redirecting to https://linkedin.com/in/tsauvajon..."),
body
);
assert_eq!(
resp.headers().get("Location"),
Some(&HeaderValue::from_str("https://linkedin.com/in/tsauvajon").unwrap())
)
}
// try to follow a shortened URL that doesn't exist
#[actix_rt::test]
async fn integration_test_link_miss() {
let req = test::TestRequest::get()
.uri("/thislinkdoesntexist")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db).service(browse)).await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(&Body::from("not found"), body);
assert_eq!(resp.headers().get("Location"), None)
}
// try to add a link for an already existing short-url
#[actix_rt::test]
async fn integration_test_collision() {
let req = test::TestRequest::post()
.uri("/alreadyexists")
.set_payload("https://something.new")
.to_request();
let mut db: HashMap<String, String> = HashMap::new();
db.insert(
"alreadyexists".into(),
"https://github.com/tsauvajon".into(),
);
let mut app = test::init_service(
App::new()
.data(web::Data::new(RwLock::new(db)))
.service(create_with_id),
)
.await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(&Body::from("already registered"), body);
}
}
| browse | identifier_name |
mod.rs | passphrase = prompt_password(prompt)?;
let confirmed = prompt_password(confirm)?;
// If they match, continue the process
if passphrase.reveal() == confirmed.reveal() {
break;
}
// If they don't match, keep prompting until we hit the sanity limit
tries += 1;
if tries == PASSPHRASE_SANITY_LIMIT {
return Err(ExitError::new(ExitCode::InputError, "Passphrases don't match!"));
}
println!("Passphrases don't match! Try again.");
}
// Score the passphrase and provide feedback
let weak = display_password_feedback(&passphrase);
// If the passphrase is weak, see if the user wishes to change it
if weak {
println!("Would you like to choose a different passphrase?");
println!(" y/Y: Yes, choose a different passphrase");
println!(" n/N: No, use this passphrase");
println!(" Enter anything else if you changed your mind and want to cancel");
let mut input = "".to_string();
std::io::stdin().read_line(&mut input);
match input.trim().to_lowercase().as_str() {
// Choose a different passphrase
"y" => {
continue;
},
// Use this passphrase
"n" => {
return Ok(passphrase);
},
// By default, we cancel to be safe
_ => {
return Err(ExitError::new(
ExitCode::InputError,
"Canceling with unchanged passphrase!",
));
},
}
} else {
// The passphrase is fine, so return it
return Ok(passphrase);
}
}
}
/// Get feedback, if available, for a weak passphrase
fn get_password_feedback(passphrase: &SafePassword) -> Option<Vec<String>> {
std::str::from_utf8(passphrase.reveal())
.ok()
.and_then(|passphrase| zxcvbn(passphrase, &[]).ok())
.and_then(|scored| scored.feedback().to_owned())
.map(|feedback| feedback.suggestions().to_owned())
.map(|suggestion| suggestion.into_iter().map(|item| item.to_string()).collect())
}
/// Display passphrase feedback to the user
///
/// Returns `true` if and only if the passphrase is weak
fn display_password_feedback(passphrase: &SafePassword) -> bool {
if passphrase.reveal().is_empty() {
// The passphrase is empty, which the scoring library doesn't handle
println!();
println!("An empty password puts your wallet at risk against an attacker with access to this device.");
println!("Use this only if you are sure that your device is safe from prying eyes!");
println!();
true
} else if let Some(feedback) = get_password_feedback(passphrase) {
// The scoring library provided feedback
println!();
println!(
"The password you chose is weak; a determined attacker with access to your device may be able to guess it."
);
println!("You may want to consider changing it to a stronger one.");
println!("Here are some suggestions:");
for suggestion in feedback {
println!("- {}", suggestion);
}
println!();
true
} else {
// The Force is strong with this one
false
}
}
/// Gets the password provided by command line argument or environment variable if available.
/// Otherwise prompts for the password to be typed in.
pub fn get_or_prompt_password(
arg_password: Option<SafePassword>,
config_password: Option<SafePassword>,
) -> Result<SafePassword, ExitError> {
if let Some(passphrase) = arg_password {
return Ok(passphrase);
}
let env = std::env::var_os(TARI_WALLET_PASSWORD);
if let Some(p) = env {
let env_password = p
.into_string()
.map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?;
return Ok(env_password.into());
}
if let Some(passphrase) = config_password {
return Ok(passphrase);
}
let password = prompt_password("Wallet password: ")?;
Ok(password)
}
fn prompt_password(prompt: &str) -> Result<SafePassword, ExitError> {
let password = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?;
Ok(SafePassword::from(password))
}
/// Allows the user to change the password of the wallet.
pub async fn change_password(
config: &ApplicationConfig,
existing: SafePassword,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<(), ExitError> {
let mut wallet = init_wallet(
config,
existing.clone(),
None,
None,
shutdown_signal,
non_interactive_mode,
)
.await?;
// Get a new passphrase
let new = get_new_passphrase("New wallet passphrase: ", "Confirm new passphrase: ")?;
// Use the existing and new passphrases to attempt to change the wallet passphrase
wallet.db.change_passphrase(&existing, &new).map_err(|e| match e {
WalletStorageError::InvalidPassphrase => {
ExitError::new(ExitCode::IncorrectOrEmptyPassword, "Your password was not changed.")
},
_ => ExitError::new(ExitCode::DatabaseError, "Your password was not changed."),
})
}
/// Populates the PeerConfig struct from:
/// 1. The custom peer in the wallet config if it exists
/// 2. The custom peer in the wallet db if it exists
/// 3. The detected local base node if any
/// 4. The service peers defined in config they exist
/// 5. The peer seeds defined in config
pub async fn get_base_node_peer_config(
config: &ApplicationConfig,
wallet: &mut WalletSqlite,
non_interactive_mode: bool,
) -> Result<PeerConfig, ExitError> {
let mut use_custom_base_node_peer = false;
let mut selected_base_node = match config.wallet.custom_base_node {
Some(ref custom) => SeedPeer::from_str(custom)
.map(|node| Some(Peer::from(node)))
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed custom base node: {}", err)))?,
None => {
if let Some(custom_base_node_peer) = get_custom_base_node_peer_from_db(wallet) {
use_custom_base_node_peer = true;
Some(custom_base_node_peer)
} else {
None
}
},
};
// If the user has not explicitly set a base node in the config, we try detect one
if!non_interactive_mode && config.wallet.custom_base_node.is_none() &&!use_custom_base_node_peer {
if let Some(detected_node) = detect_local_base_node(config.wallet.network).await {
match selected_base_node {
Some(ref base_node) if base_node.public_key == detected_node.public_key => {
// Skip asking because it's already set
},
Some(_) | None => {
println!(
"Local Base Node detected with public key {} and address {}",
detected_node.public_key,
detected_node
.addresses
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
);
if prompt(
"Would you like to use this base node? IF YOU DID NOT START THIS BASE NODE YOU SHOULD SELECT \
NO (Y/n)",
) {
let address = detected_node.addresses.first().ok_or_else(|| {
ExitError::new(ExitCode::ConfigError, "No address found for detected base node")
})?;
set_custom_base_node_peer_in_db(wallet, &detected_node.public_key, address)?;
selected_base_node = Some(detected_node.into());
}
},
}
}
}
// config
let base_node_peers = config
.wallet
.base_node_service_peers
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?;
// peer seeds
let peer_seeds = config
.peer_seeds
.peer_seeds
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?;
let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds);
debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config);
Ok(peer_config)
}
/// Determines which mode the wallet should run in.
pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode {
// Recovery mode
if matches!(boot_mode, WalletBoot::Recovery) {
if cli.non_interactive_mode {
return WalletMode::RecoveryDaemon;
} else {
return WalletMode::RecoveryTui;
}
}
match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) {
// TUI mode
(false, None, None) => WalletMode::Tui,
// GRPC mode
(true, None, None) => WalletMode::Grpc,
// Script mode
(_, Some(path), None) => WalletMode::Script(path),
// Command mode
(_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command),
// Invalid combinations
_ => WalletMode::Invalid,
}
}
/// Set up the app environment and state for use by the UI
#[allow(clippy::too_many_lines)]
pub async fn init_wallet(
config: &ApplicationConfig,
arg_password: SafePassword,
seed_words_file_name: Option<PathBuf>,
recovery_seed: Option<CipherSeed>,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<WalletSqlite, ExitError> {
fs::create_dir_all(
config
.wallet
.db_file
.parent()
.expect("console_wallet_db_file cannot be set to a root directory"),
)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?;
fs::create_dir_all(&config.wallet.p2p.datastore_path)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?;
debug!(target: LOG_TARGET, "Running Wallet database migrations");
let db_path = &config.wallet.db_file;
// wallet should be encrypted from the beginning, so we must require a password to be provided by the user
let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) =
initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?;
let wallet_db = WalletDatabase::new(wallet_backend);
let output_db = OutputManagerDatabase::new(output_manager_backend.clone());
debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",);
let node_addresses = if config.wallet.p2p.public_addresses.is_empty() {
match wallet_db.get_node_address()? {
Some(addr) => MultiaddrList::from(vec![addr]),
None => MultiaddrList::default(),
}
} else {
config.wallet.p2p.public_addresses.clone()
}; | let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?;
let node_identity = match config.wallet.identity_file.as_ref() {
Some(identity_file) => {
warn!(
target: LOG_TARGET,
"Node identity overridden by file {}",
identity_file.to_string_lossy()
);
setup_node_identity(
identity_file,
node_addresses.to_vec(),
true,
PeerFeatures::COMMUNICATION_CLIENT,
)?
},
None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses.to_vec())?,
};
let mut wallet_config = config.wallet.clone();
if let TransportType::Tor = config.wallet.p2p.transport.transport_type {
wallet_config.p2p.transport.tor.identity = wallet_db.get_tor_id()?;
}
let consensus_manager = ConsensusManager::builder(config.wallet.network)
.build()
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?;
let factories = CryptoFactories::default();
let mut wallet = Wallet::start(
wallet_config,
config.peer_seeds.clone(),
config.auto_update.clone(),
node_identity,
consensus_manager,
factories,
wallet_db,
output_db,
transaction_backend,
output_manager_backend,
contacts_backend,
key_manager_backend,
shutdown_signal,
master_seed,
)
.await
.map_err(|e| match e {
WalletError::CommsInitializationError(cie) => cie.to_exit_error(),
e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)),
})?;
if let Some(hs) = wallet.comms.hidden_service() {
wallet
.db
.set_tor_identity(hs.tor_identity().clone())
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?;
}
if let Some(file_name) = seed_words_file_name {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" ");
let _result = fs::write(file_name, seed_words.reveal()).map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Problem writing seed words to file: {}", e),
)
});
};
Ok(wallet)
}
async fn detect_local_base_node(network: Network) -> Option<SeedPeer> {
use minotari_app_grpc::tari_rpc::{base_node_client::BaseNodeClient, Empty};
let addr = format!(
"http://127.0.0.1:{}",
grpc_default_port(ApplicationType::BaseNode, network)
);
debug!(target: LOG_TARGET, "Checking for local base node at {}", addr);
let mut node_conn = match BaseNodeClient::connect(addr).await.ok() {
Some(conn) => conn,
None => {
debug!(target: LOG_TARGET, "No local base node detected");
return None;
},
};
let resp = node_conn.identify(Empty {}).await.ok()?;
let identity = resp.get_ref();
let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?;
let addresses = identity
.public_addresses
.iter()
.filter_map(|s| Multiaddr::from_str(s).ok())
.collect::<Vec<_>>();
debug!(
target: LOG_TARGET,
"Local base node found with pk={} and addresses={}",
public_key.to_hex(),
addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>().join(",")
);
Some(SeedPeer::new(public_key, addresses))
}
fn setup_identity_from_db<D: WalletBackend +'static>(
wallet_db: &WalletDatabase<D>,
master_seed: &CipherSeed,
node_addresses: Vec<Multiaddr>,
) -> Result<Arc<NodeIdentity>, ExitError> {
let node_features = wallet_db
.get_node_features()?
.unwrap_or(PeerFeatures::COMMUNICATION_CLIENT);
let identity_sig = wallet_db.get_comms_identity_signature()?;
let comms_secret_key = derive_comms_secret_key(master_seed)?;
// This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig
// to None
let identity_sig = identity_sig.filter(|sig| {
let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key);
sig.is_valid(&comms_public_key, node_features, &node_addresses)
});
// SAFETY: we are manually checking the validity of this signature before adding Some(..)
let node_identity = Arc::new(NodeIdentity::with_signature_unchecked(
comms_secret_key,
node_addresses,
node_features,
identity_sig,
));
if!node_identity.is_signed() {
node_identity.sign();
// unreachable panic: signed above
let sig = node_identity
.identity_signature_read()
.as_ref()
.expect("unreachable panic")
.clone();
wallet_db.set_comms_identity_signature(sig)?;
}
Ok(node_identity)
}
/// Starts the wallet by setting the base node peer, and restarting the transaction and broadcast protocols.
pub async fn start_wallet(
wallet: &mut WalletSqlite,
base_node: &Peer,
wallet_mode: &WalletMode,
) -> Result<(), ExitError> {
debug!(target: LOG_TARGET, "Setting base node peer");
let net_address = base_node
.addresses
.best()
.ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?;
wallet
.set_base_node_peer(base_node.public_key.clone(), net_address.address().clone())
.await
.map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Error setting wallet base node peer. {}", e),
)
})?;
// Restart transaction protocols if not running in script or command modes
if!matches!(wallet_mode, WalletMode::Command(_)) &&!matches!(wallet_mode, WalletMode::Script(_)) {
// NOTE: https://github.com/tari-project/tari/issues/5227
debug!("revalidating all transactions");
if let Err(e) = wallet.transaction_service.revalidate_all_transactions().await {
error!(target: LOG_TARGET, "Failed to revalidate all transactions: {}", e);
}
debug!("restarting transaction protocols");
if let Err(e) = wallet.transaction_service.restart_transaction_protocols().await {
error!(target: LOG_TARGET, "Problem restarting transaction protocols: {}", e);
}
debug!("validating transactions");
if let Err(e) = wallet.transaction_service.validate_transactions().await {
error!(
target: LOG_TARGET,
"Problem validating and restarting transaction protocols: {}", e
);
}
// validate transaction outputs
validate_txos(wallet).await?;
}
Ok(())
}
async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitError> {
debug!(target: LOG_TARGET, "Starting TXO validations.");
wallet.output_manager_service.validate_txos().await.map_err(|e| {
error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e);
ExitError::new(ExitCode::WalletError, e)
})?;
debug!(target: LOG_TARGET, "TXO validations started.");
Ok(())
}
pub(crate) fn confirm_seed_words(wallet: &mut WalletSqlite) -> Result<(), ExitError> {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?;
println!();
println!("=========================");
println!(" IMPORTANT! ");
println!("=========================");
println!("These are your wallet seed words.");
println!("They can be used to recover your wallet and funds.");
println!("WRITE THEM DOWN OR COPY THEM NOW. THIS IS YOUR ONLY CHANCE TO DO SO.");
println!();
println!("=========================");
println!("{}", seed_words.join(" ").reveal());
println!("=========================");
println!("\x07"); // beep!
let mut rl = Editor::<()>::new();
loop {
println!("I confirm that I will never see these seed words again.");
println!(r#"Type the word "confirm" to continue."#);
let readline = rl.readline(">> ");
match readline {
| random_line_split |
|
mod.rs | _LIMIT {
return Err(ExitError::new(ExitCode::InputError, "Passphrases don't match!"));
}
println!("Passphrases don't match! Try again.");
}
// Score the passphrase and provide feedback
let weak = display_password_feedback(&passphrase);
// If the passphrase is weak, see if the user wishes to change it
if weak {
println!("Would you like to choose a different passphrase?");
println!(" y/Y: Yes, choose a different passphrase");
println!(" n/N: No, use this passphrase");
println!(" Enter anything else if you changed your mind and want to cancel");
let mut input = "".to_string();
std::io::stdin().read_line(&mut input);
match input.trim().to_lowercase().as_str() {
// Choose a different passphrase
"y" => {
continue;
},
// Use this passphrase
"n" => {
return Ok(passphrase);
},
// By default, we cancel to be safe
_ => {
return Err(ExitError::new(
ExitCode::InputError,
"Canceling with unchanged passphrase!",
));
},
}
} else {
// The passphrase is fine, so return it
return Ok(passphrase);
}
}
}
/// Get feedback, if available, for a weak passphrase
fn get_password_feedback(passphrase: &SafePassword) -> Option<Vec<String>> {
std::str::from_utf8(passphrase.reveal())
.ok()
.and_then(|passphrase| zxcvbn(passphrase, &[]).ok())
.and_then(|scored| scored.feedback().to_owned())
.map(|feedback| feedback.suggestions().to_owned())
.map(|suggestion| suggestion.into_iter().map(|item| item.to_string()).collect())
}
/// Display passphrase feedback to the user
///
/// Returns `true` if and only if the passphrase is weak
fn display_password_feedback(passphrase: &SafePassword) -> bool {
if passphrase.reveal().is_empty() {
// The passphrase is empty, which the scoring library doesn't handle
println!();
println!("An empty password puts your wallet at risk against an attacker with access to this device.");
println!("Use this only if you are sure that your device is safe from prying eyes!");
println!();
true
} else if let Some(feedback) = get_password_feedback(passphrase) {
// The scoring library provided feedback
println!();
println!(
"The password you chose is weak; a determined attacker with access to your device may be able to guess it."
);
println!("You may want to consider changing it to a stronger one.");
println!("Here are some suggestions:");
for suggestion in feedback {
println!("- {}", suggestion);
}
println!();
true
} else {
// The Force is strong with this one
false
}
}
/// Gets the password provided by command line argument or environment variable if available.
/// Otherwise prompts for the password to be typed in.
pub fn get_or_prompt_password(
arg_password: Option<SafePassword>,
config_password: Option<SafePassword>,
) -> Result<SafePassword, ExitError> {
if let Some(passphrase) = arg_password {
return Ok(passphrase);
}
let env = std::env::var_os(TARI_WALLET_PASSWORD);
if let Some(p) = env {
let env_password = p
.into_string()
.map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?;
return Ok(env_password.into());
}
if let Some(passphrase) = config_password {
return Ok(passphrase);
}
let password = prompt_password("Wallet password: ")?;
Ok(password)
}
fn prompt_password(prompt: &str) -> Result<SafePassword, ExitError> {
let password = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?;
Ok(SafePassword::from(password))
}
/// Allows the user to change the password of the wallet.
pub async fn change_password(
config: &ApplicationConfig,
existing: SafePassword,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<(), ExitError> {
let mut wallet = init_wallet(
config,
existing.clone(),
None,
None,
shutdown_signal,
non_interactive_mode,
)
.await?;
// Get a new passphrase
let new = get_new_passphrase("New wallet passphrase: ", "Confirm new passphrase: ")?;
// Use the existing and new passphrases to attempt to change the wallet passphrase
wallet.db.change_passphrase(&existing, &new).map_err(|e| match e {
WalletStorageError::InvalidPassphrase => {
ExitError::new(ExitCode::IncorrectOrEmptyPassword, "Your password was not changed.")
},
_ => ExitError::new(ExitCode::DatabaseError, "Your password was not changed."),
})
}
/// Populates the PeerConfig struct from:
/// 1. The custom peer in the wallet config if it exists
/// 2. The custom peer in the wallet db if it exists
/// 3. The detected local base node if any
/// 4. The service peers defined in config they exist
/// 5. The peer seeds defined in config
pub async fn get_base_node_peer_config(
config: &ApplicationConfig,
wallet: &mut WalletSqlite,
non_interactive_mode: bool,
) -> Result<PeerConfig, ExitError> {
let mut use_custom_base_node_peer = false;
let mut selected_base_node = match config.wallet.custom_base_node {
Some(ref custom) => SeedPeer::from_str(custom)
.map(|node| Some(Peer::from(node)))
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed custom base node: {}", err)))?,
None => {
if let Some(custom_base_node_peer) = get_custom_base_node_peer_from_db(wallet) {
use_custom_base_node_peer = true;
Some(custom_base_node_peer)
} else {
None
}
},
};
// If the user has not explicitly set a base node in the config, we try detect one
if!non_interactive_mode && config.wallet.custom_base_node.is_none() &&!use_custom_base_node_peer {
if let Some(detected_node) = detect_local_base_node(config.wallet.network).await {
match selected_base_node {
Some(ref base_node) if base_node.public_key == detected_node.public_key => {
// Skip asking because it's already set
},
Some(_) | None => {
println!(
"Local Base Node detected with public key {} and address {}",
detected_node.public_key,
detected_node
.addresses
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
);
if prompt(
"Would you like to use this base node? IF YOU DID NOT START THIS BASE NODE YOU SHOULD SELECT \
NO (Y/n)",
) {
let address = detected_node.addresses.first().ok_or_else(|| {
ExitError::new(ExitCode::ConfigError, "No address found for detected base node")
})?;
set_custom_base_node_peer_in_db(wallet, &detected_node.public_key, address)?;
selected_base_node = Some(detected_node.into());
}
},
}
}
}
// config
let base_node_peers = config
.wallet
.base_node_service_peers
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?;
// peer seeds
let peer_seeds = config
.peer_seeds
.peer_seeds
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?;
let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds);
debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config);
Ok(peer_config)
}
/// Determines which mode the wallet should run in.
pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode {
// Recovery mode
if matches!(boot_mode, WalletBoot::Recovery) {
if cli.non_interactive_mode {
return WalletMode::RecoveryDaemon;
} else {
return WalletMode::RecoveryTui;
}
}
match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) {
// TUI mode
(false, None, None) => WalletMode::Tui,
// GRPC mode
(true, None, None) => WalletMode::Grpc,
// Script mode
(_, Some(path), None) => WalletMode::Script(path),
// Command mode
(_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command),
// Invalid combinations
_ => WalletMode::Invalid,
}
}
/// Set up the app environment and state for use by the UI
#[allow(clippy::too_many_lines)]
pub async fn init_wallet(
config: &ApplicationConfig,
arg_password: SafePassword,
seed_words_file_name: Option<PathBuf>,
recovery_seed: Option<CipherSeed>,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<WalletSqlite, ExitError> {
fs::create_dir_all(
config
.wallet
.db_file
.parent()
.expect("console_wallet_db_file cannot be set to a root directory"),
)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?;
fs::create_dir_all(&config.wallet.p2p.datastore_path)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?;
debug!(target: LOG_TARGET, "Running Wallet database migrations");
let db_path = &config.wallet.db_file;
// wallet should be encrypted from the beginning, so we must require a password to be provided by the user
let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) =
initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?;
let wallet_db = WalletDatabase::new(wallet_backend);
let output_db = OutputManagerDatabase::new(output_manager_backend.clone());
debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",);
let node_addresses = if config.wallet.p2p.public_addresses.is_empty() {
match wallet_db.get_node_address()? {
Some(addr) => MultiaddrList::from(vec![addr]),
None => MultiaddrList::default(),
}
} else {
config.wallet.p2p.public_addresses.clone()
};
let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?;
let node_identity = match config.wallet.identity_file.as_ref() {
Some(identity_file) => {
warn!(
target: LOG_TARGET,
"Node identity overridden by file {}",
identity_file.to_string_lossy()
);
setup_node_identity(
identity_file,
node_addresses.to_vec(),
true,
PeerFeatures::COMMUNICATION_CLIENT,
)?
},
None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses.to_vec())?,
};
let mut wallet_config = config.wallet.clone();
if let TransportType::Tor = config.wallet.p2p.transport.transport_type {
wallet_config.p2p.transport.tor.identity = wallet_db.get_tor_id()?;
}
let consensus_manager = ConsensusManager::builder(config.wallet.network)
.build()
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?;
let factories = CryptoFactories::default();
let mut wallet = Wallet::start(
wallet_config,
config.peer_seeds.clone(),
config.auto_update.clone(),
node_identity,
consensus_manager,
factories,
wallet_db,
output_db,
transaction_backend,
output_manager_backend,
contacts_backend,
key_manager_backend,
shutdown_signal,
master_seed,
)
.await
.map_err(|e| match e {
WalletError::CommsInitializationError(cie) => cie.to_exit_error(),
e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)),
})?;
if let Some(hs) = wallet.comms.hidden_service() {
wallet
.db
.set_tor_identity(hs.tor_identity().clone())
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?;
}
if let Some(file_name) = seed_words_file_name {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" ");
let _result = fs::write(file_name, seed_words.reveal()).map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Problem writing seed words to file: {}", e),
)
});
};
Ok(wallet)
}
async fn detect_local_base_node(network: Network) -> Option<SeedPeer> {
use minotari_app_grpc::tari_rpc::{base_node_client::BaseNodeClient, Empty};
let addr = format!(
"http://127.0.0.1:{}",
grpc_default_port(ApplicationType::BaseNode, network)
);
debug!(target: LOG_TARGET, "Checking for local base node at {}", addr);
let mut node_conn = match BaseNodeClient::connect(addr).await.ok() {
Some(conn) => conn,
None => {
debug!(target: LOG_TARGET, "No local base node detected");
return None;
},
};
let resp = node_conn.identify(Empty {}).await.ok()?;
let identity = resp.get_ref();
let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?;
let addresses = identity
.public_addresses
.iter()
.filter_map(|s| Multiaddr::from_str(s).ok())
.collect::<Vec<_>>();
debug!(
target: LOG_TARGET,
"Local base node found with pk={} and addresses={}",
public_key.to_hex(),
addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>().join(",")
);
Some(SeedPeer::new(public_key, addresses))
}
fn setup_identity_from_db<D: WalletBackend +'static>(
wallet_db: &WalletDatabase<D>,
master_seed: &CipherSeed,
node_addresses: Vec<Multiaddr>,
) -> Result<Arc<NodeIdentity>, ExitError> {
let node_features = wallet_db
.get_node_features()?
.unwrap_or(PeerFeatures::COMMUNICATION_CLIENT);
let identity_sig = wallet_db.get_comms_identity_signature()?;
let comms_secret_key = derive_comms_secret_key(master_seed)?;
// This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig
// to None
let identity_sig = identity_sig.filter(|sig| {
let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key);
sig.is_valid(&comms_public_key, node_features, &node_addresses)
});
// SAFETY: we are manually checking the validity of this signature before adding Some(..)
let node_identity = Arc::new(NodeIdentity::with_signature_unchecked(
comms_secret_key,
node_addresses,
node_features,
identity_sig,
));
if!node_identity.is_signed() {
node_identity.sign();
// unreachable panic: signed above
let sig = node_identity
.identity_signature_read()
.as_ref()
.expect("unreachable panic")
.clone();
wallet_db.set_comms_identity_signature(sig)?;
}
Ok(node_identity)
}
/// Starts the wallet by setting the base node peer, and restarting the transaction and broadcast protocols.
pub async fn start_wallet(
wallet: &mut WalletSqlite,
base_node: &Peer,
wallet_mode: &WalletMode,
) -> Result<(), ExitError> {
debug!(target: LOG_TARGET, "Setting base node peer");
let net_address = base_node
.addresses
.best()
.ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?;
wallet
.set_base_node_peer(base_node.public_key.clone(), net_address.address().clone())
.await
.map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Error setting wallet base node peer. {}", e),
)
})?;
// Restart transaction protocols if not running in script or command modes
if!matches!(wallet_mode, WalletMode::Command(_)) &&!matches!(wallet_mode, WalletMode::Script(_)) {
// NOTE: https://github.com/tari-project/tari/issues/5227
debug!("revalidating all transactions");
if let Err(e) = wallet.transaction_service.revalidate_all_transactions().await {
error!(target: LOG_TARGET, "Failed to revalidate all transactions: {}", e);
}
debug!("restarting transaction protocols");
if let Err(e) = wallet.transaction_service.restart_transaction_protocols().await {
error!(target: LOG_TARGET, "Problem restarting transaction protocols: {}", e);
}
debug!("validating transactions");
if let Err(e) = wallet.transaction_service.validate_transactions().await {
error!(
target: LOG_TARGET,
"Problem validating and restarting transaction protocols: {}", e
);
}
// validate transaction outputs
validate_txos(wallet).await?;
}
Ok(())
}
async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitError> {
debug!(target: LOG_TARGET, "Starting TXO validations.");
wallet.output_manager_service.validate_txos().await.map_err(|e| {
error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e);
ExitError::new(ExitCode::WalletError, e)
})?;
debug!(target: LOG_TARGET, "TXO validations started.");
Ok(())
}
pub(crate) fn confirm_seed_words(wallet: &mut WalletSqlite) -> Result<(), ExitError> {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?;
println!();
println!("=========================");
println!(" IMPORTANT! ");
println!("=========================");
println!("These are your wallet seed words.");
println!("They can be used to recover your wallet and funds.");
println!("WRITE THEM DOWN OR COPY THEM NOW. THIS IS YOUR ONLY CHANCE TO DO SO.");
println!();
println!("=========================");
println!("{}", seed_words.join(" ").reveal());
println!("=========================");
println!("\x07"); // beep!
let mut rl = Editor::<()>::new();
loop {
println!("I confirm that I will never see these seed words again.");
println!(r#"Type the word "confirm" to continue."#);
let readline = rl.readline(">> ");
match readline {
Ok(line) => match line.to_lowercase().as_ref() {
"confirm" => return Ok(()),
_ => continue,
},
Err(e) => {
return Err(ExitError::new(ExitCode::IOError, e));
},
}
}
}
/// Clear the terminal and print the Tari splash
pub fn | tari_splash_screen | identifier_name |
|
mod.rs | passphrase = prompt_password(prompt)?;
let confirmed = prompt_password(confirm)?;
// If they match, continue the process
if passphrase.reveal() == confirmed.reveal() {
break;
}
// If they don't match, keep prompting until we hit the sanity limit
tries += 1;
if tries == PASSPHRASE_SANITY_LIMIT {
return Err(ExitError::new(ExitCode::InputError, "Passphrases don't match!"));
}
println!("Passphrases don't match! Try again.");
}
// Score the passphrase and provide feedback
let weak = display_password_feedback(&passphrase);
// If the passphrase is weak, see if the user wishes to change it
if weak {
println!("Would you like to choose a different passphrase?");
println!(" y/Y: Yes, choose a different passphrase");
println!(" n/N: No, use this passphrase");
println!(" Enter anything else if you changed your mind and want to cancel");
let mut input = "".to_string();
std::io::stdin().read_line(&mut input);
match input.trim().to_lowercase().as_str() {
// Choose a different passphrase
"y" => {
continue;
},
// Use this passphrase
"n" => {
return Ok(passphrase);
},
// By default, we cancel to be safe
_ => {
return Err(ExitError::new(
ExitCode::InputError,
"Canceling with unchanged passphrase!",
));
},
}
} else {
// The passphrase is fine, so return it
return Ok(passphrase);
}
}
}
/// Get feedback, if available, for a weak passphrase
fn get_password_feedback(passphrase: &SafePassword) -> Option<Vec<String>> {
std::str::from_utf8(passphrase.reveal())
.ok()
.and_then(|passphrase| zxcvbn(passphrase, &[]).ok())
.and_then(|scored| scored.feedback().to_owned())
.map(|feedback| feedback.suggestions().to_owned())
.map(|suggestion| suggestion.into_iter().map(|item| item.to_string()).collect())
}
/// Display passphrase feedback to the user
///
/// Returns `true` if and only if the passphrase is weak
fn display_password_feedback(passphrase: &SafePassword) -> bool {
if passphrase.reveal().is_empty() {
// The passphrase is empty, which the scoring library doesn't handle
println!();
println!("An empty password puts your wallet at risk against an attacker with access to this device.");
println!("Use this only if you are sure that your device is safe from prying eyes!");
println!();
true
} else if let Some(feedback) = get_password_feedback(passphrase) {
// The scoring library provided feedback
println!();
println!(
"The password you chose is weak; a determined attacker with access to your device may be able to guess it."
);
println!("You may want to consider changing it to a stronger one.");
println!("Here are some suggestions:");
for suggestion in feedback {
println!("- {}", suggestion);
}
println!();
true
} else {
// The Force is strong with this one
false
}
}
/// Gets the password provided by command line argument or environment variable if available.
/// Otherwise prompts for the password to be typed in.
pub fn get_or_prompt_password(
arg_password: Option<SafePassword>,
config_password: Option<SafePassword>,
) -> Result<SafePassword, ExitError> {
if let Some(passphrase) = arg_password {
return Ok(passphrase);
}
let env = std::env::var_os(TARI_WALLET_PASSWORD);
if let Some(p) = env {
let env_password = p
.into_string()
.map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?;
return Ok(env_password.into());
}
if let Some(passphrase) = config_password {
return Ok(passphrase);
}
let password = prompt_password("Wallet password: ")?;
Ok(password)
}
fn prompt_password(prompt: &str) -> Result<SafePassword, ExitError> {
let password = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?;
Ok(SafePassword::from(password))
}
/// Allows the user to change the password of the wallet.
pub async fn change_password(
config: &ApplicationConfig,
existing: SafePassword,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<(), ExitError> {
let mut wallet = init_wallet(
config,
existing.clone(),
None,
None,
shutdown_signal,
non_interactive_mode,
)
.await?;
// Get a new passphrase
let new = get_new_passphrase("New wallet passphrase: ", "Confirm new passphrase: ")?;
// Use the existing and new passphrases to attempt to change the wallet passphrase
wallet.db.change_passphrase(&existing, &new).map_err(|e| match e {
WalletStorageError::InvalidPassphrase => {
ExitError::new(ExitCode::IncorrectOrEmptyPassword, "Your password was not changed.")
},
_ => ExitError::new(ExitCode::DatabaseError, "Your password was not changed."),
})
}
/// Populates the PeerConfig struct from:
/// 1. The custom peer in the wallet config if it exists
/// 2. The custom peer in the wallet db if it exists
/// 3. The detected local base node if any
/// 4. The service peers defined in config they exist
/// 5. The peer seeds defined in config
pub async fn get_base_node_peer_config(
config: &ApplicationConfig,
wallet: &mut WalletSqlite,
non_interactive_mode: bool,
) -> Result<PeerConfig, ExitError> {
let mut use_custom_base_node_peer = false;
let mut selected_base_node = match config.wallet.custom_base_node {
Some(ref custom) => SeedPeer::from_str(custom)
.map(|node| Some(Peer::from(node)))
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed custom base node: {}", err)))?,
None => {
if let Some(custom_base_node_peer) = get_custom_base_node_peer_from_db(wallet) {
use_custom_base_node_peer = true;
Some(custom_base_node_peer)
} else {
None
}
},
};
// If the user has not explicitly set a base node in the config, we try detect one
if!non_interactive_mode && config.wallet.custom_base_node.is_none() &&!use_custom_base_node_peer {
if let Some(detected_node) = detect_local_base_node(config.wallet.network).await {
match selected_base_node {
Some(ref base_node) if base_node.public_key == detected_node.public_key => {
// Skip asking because it's already set
},
Some(_) | None => {
println!(
"Local Base Node detected with public key {} and address {}",
detected_node.public_key,
detected_node
.addresses
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
);
if prompt(
"Would you like to use this base node? IF YOU DID NOT START THIS BASE NODE YOU SHOULD SELECT \
NO (Y/n)",
) {
let address = detected_node.addresses.first().ok_or_else(|| {
ExitError::new(ExitCode::ConfigError, "No address found for detected base node")
})?;
set_custom_base_node_peer_in_db(wallet, &detected_node.public_key, address)?;
selected_base_node = Some(detected_node.into());
}
},
}
}
}
// config
let base_node_peers = config
.wallet
.base_node_service_peers
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?;
// peer seeds
let peer_seeds = config
.peer_seeds
.peer_seeds
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?;
let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds);
debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config);
Ok(peer_config)
}
/// Determines which mode the wallet should run in.
pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode {
// Recovery mode
if matches!(boot_mode, WalletBoot::Recovery) {
if cli.non_interactive_mode {
return WalletMode::RecoveryDaemon;
} else {
return WalletMode::RecoveryTui;
}
}
match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) {
// TUI mode
(false, None, None) => WalletMode::Tui,
// GRPC mode
(true, None, None) => WalletMode::Grpc,
// Script mode
(_, Some(path), None) => WalletMode::Script(path),
// Command mode
(_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command),
// Invalid combinations
_ => WalletMode::Invalid,
}
}
/// Set up the app environment and state for use by the UI
#[allow(clippy::too_many_lines)]
pub async fn init_wallet(
config: &ApplicationConfig,
arg_password: SafePassword,
seed_words_file_name: Option<PathBuf>,
recovery_seed: Option<CipherSeed>,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<WalletSqlite, ExitError> {
fs::create_dir_all(
config
.wallet
.db_file
.parent()
.expect("console_wallet_db_file cannot be set to a root directory"),
)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?;
fs::create_dir_all(&config.wallet.p2p.datastore_path)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?;
debug!(target: LOG_TARGET, "Running Wallet database migrations");
let db_path = &config.wallet.db_file;
// wallet should be encrypted from the beginning, so we must require a password to be provided by the user
let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) =
initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?;
let wallet_db = WalletDatabase::new(wallet_backend);
let output_db = OutputManagerDatabase::new(output_manager_backend.clone());
debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",);
let node_addresses = if config.wallet.p2p.public_addresses.is_empty() {
match wallet_db.get_node_address()? {
Some(addr) => MultiaddrList::from(vec![addr]),
None => MultiaddrList::default(),
}
} else {
config.wallet.p2p.public_addresses.clone()
};
let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?;
let node_identity = match config.wallet.identity_file.as_ref() {
Some(identity_file) => {
warn!(
target: LOG_TARGET,
"Node identity overridden by file {}",
identity_file.to_string_lossy()
);
setup_node_identity(
identity_file,
node_addresses.to_vec(),
true,
PeerFeatures::COMMUNICATION_CLIENT,
)?
},
None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses.to_vec())?,
};
let mut wallet_config = config.wallet.clone();
if let TransportType::Tor = config.wallet.p2p.transport.transport_type {
wallet_config.p2p.transport.tor.identity = wallet_db.get_tor_id()?;
}
let consensus_manager = ConsensusManager::builder(config.wallet.network)
.build()
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?;
let factories = CryptoFactories::default();
let mut wallet = Wallet::start(
wallet_config,
config.peer_seeds.clone(),
config.auto_update.clone(),
node_identity,
consensus_manager,
factories,
wallet_db,
output_db,
transaction_backend,
output_manager_backend,
contacts_backend,
key_manager_backend,
shutdown_signal,
master_seed,
)
.await
.map_err(|e| match e {
WalletError::CommsInitializationError(cie) => cie.to_exit_error(),
e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)),
})?;
if let Some(hs) = wallet.comms.hidden_service() {
wallet
.db
.set_tor_identity(hs.tor_identity().clone())
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?;
}
if let Some(file_name) = seed_words_file_name {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" ");
let _result = fs::write(file_name, seed_words.reveal()).map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Problem writing seed words to file: {}", e),
)
});
};
Ok(wallet)
}
async fn detect_local_base_node(network: Network) -> Option<SeedPeer> {
use minotari_app_grpc::tari_rpc::{base_node_client::BaseNodeClient, Empty};
let addr = format!(
"http://127.0.0.1:{}",
grpc_default_port(ApplicationType::BaseNode, network)
);
debug!(target: LOG_TARGET, "Checking for local base node at {}", addr);
let mut node_conn = match BaseNodeClient::connect(addr).await.ok() {
Some(conn) => conn,
None => {
debug!(target: LOG_TARGET, "No local base node detected");
return None;
},
};
let resp = node_conn.identify(Empty {}).await.ok()?;
let identity = resp.get_ref();
let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?;
let addresses = identity
.public_addresses
.iter()
.filter_map(|s| Multiaddr::from_str(s).ok())
.collect::<Vec<_>>();
debug!(
target: LOG_TARGET,
"Local base node found with pk={} and addresses={}",
public_key.to_hex(),
addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>().join(",")
);
Some(SeedPeer::new(public_key, addresses))
}
fn setup_identity_from_db<D: WalletBackend +'static>(
wallet_db: &WalletDatabase<D>,
master_seed: &CipherSeed,
node_addresses: Vec<Multiaddr>,
) -> Result<Arc<NodeIdentity>, ExitError> {
let node_features = wallet_db
.get_node_features()?
.unwrap_or(PeerFeatures::COMMUNICATION_CLIENT);
let identity_sig = wallet_db.get_comms_identity_signature()?;
let comms_secret_key = derive_comms_secret_key(master_seed)?;
// This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig
// to None
let identity_sig = identity_sig.filter(|sig| {
let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key);
sig.is_valid(&comms_public_key, node_features, &node_addresses)
});
// SAFETY: we are manually checking the validity of this signature before adding Some(..)
let node_identity = Arc::new(NodeIdentity::with_signature_unchecked(
comms_secret_key,
node_addresses,
node_features,
identity_sig,
));
if!node_identity.is_signed() {
node_identity.sign();
// unreachable panic: signed above
let sig = node_identity
.identity_signature_read()
.as_ref()
.expect("unreachable panic")
.clone();
wallet_db.set_comms_identity_signature(sig)?;
}
Ok(node_identity)
}
/// Starts the wallet by setting the base node peer, and restarting the transaction and broadcast protocols.
pub async fn start_wallet(
wallet: &mut WalletSqlite,
base_node: &Peer,
wallet_mode: &WalletMode,
) -> Result<(), ExitError> | // NOTE: https://github.com/tari-project/tari/issues/5227
debug!("revalidating all transactions");
if let Err(e) = wallet.transaction_service.revalidate_all_transactions().await {
error!(target: LOG_TARGET, "Failed to revalidate all transactions: {}", e);
}
debug!("restarting transaction protocols");
if let Err(e) = wallet.transaction_service.restart_transaction_protocols().await {
error!(target: LOG_TARGET, "Problem restarting transaction protocols: {}", e);
}
debug!("validating transactions");
if let Err(e) = wallet.transaction_service.validate_transactions().await {
error!(
target: LOG_TARGET,
"Problem validating and restarting transaction protocols: {}", e
);
}
// validate transaction outputs
validate_txos(wallet).await?;
}
Ok(())
}
async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitError> {
debug!(target: LOG_TARGET, "Starting TXO validations.");
wallet.output_manager_service.validate_txos().await.map_err(|e| {
error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e);
ExitError::new(ExitCode::WalletError, e)
})?;
debug!(target: LOG_TARGET, "TXO validations started.");
Ok(())
}
pub(crate) fn confirm_seed_words(wallet: &mut WalletSqlite) -> Result<(), ExitError> {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?;
println!();
println!("=========================");
println!(" IMPORTANT! ");
println!("=========================");
println!("These are your wallet seed words.");
println!("They can be used to recover your wallet and funds.");
println!("WRITE THEM DOWN OR COPY THEM NOW. THIS IS YOUR ONLY CHANCE TO DO SO.");
println!();
println!("=========================");
println!("{}", seed_words.join(" ").reveal());
println!("=========================");
println!("\x07"); // beep!
let mut rl = Editor::<()>::new();
loop {
println!("I confirm that I will never see these seed words again.");
println!(r#"Type the word "confirm" to continue."#);
let readline = rl.readline(">> ");
match readline {
| {
debug!(target: LOG_TARGET, "Setting base node peer");
let net_address = base_node
.addresses
.best()
.ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?;
wallet
.set_base_node_peer(base_node.public_key.clone(), net_address.address().clone())
.await
.map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Error setting wallet base node peer. {}", e),
)
})?;
// Restart transaction protocols if not running in script or command modes
if !matches!(wallet_mode, WalletMode::Command(_)) && !matches!(wallet_mode, WalletMode::Script(_)) { | identifier_body |
mod.rs | passphrase = prompt_password(prompt)?;
let confirmed = prompt_password(confirm)?;
// If they match, continue the process
if passphrase.reveal() == confirmed.reveal() {
break;
}
// If they don't match, keep prompting until we hit the sanity limit
tries += 1;
if tries == PASSPHRASE_SANITY_LIMIT {
return Err(ExitError::new(ExitCode::InputError, "Passphrases don't match!"));
}
println!("Passphrases don't match! Try again.");
}
// Score the passphrase and provide feedback
let weak = display_password_feedback(&passphrase);
// If the passphrase is weak, see if the user wishes to change it
if weak {
println!("Would you like to choose a different passphrase?");
println!(" y/Y: Yes, choose a different passphrase");
println!(" n/N: No, use this passphrase");
println!(" Enter anything else if you changed your mind and want to cancel");
let mut input = "".to_string();
std::io::stdin().read_line(&mut input);
match input.trim().to_lowercase().as_str() {
// Choose a different passphrase
"y" => {
continue;
},
// Use this passphrase
"n" => {
return Ok(passphrase);
},
// By default, we cancel to be safe
_ => {
return Err(ExitError::new(
ExitCode::InputError,
"Canceling with unchanged passphrase!",
));
},
}
} else {
// The passphrase is fine, so return it
return Ok(passphrase);
}
}
}
/// Get feedback, if available, for a weak passphrase
fn get_password_feedback(passphrase: &SafePassword) -> Option<Vec<String>> {
std::str::from_utf8(passphrase.reveal())
.ok()
.and_then(|passphrase| zxcvbn(passphrase, &[]).ok())
.and_then(|scored| scored.feedback().to_owned())
.map(|feedback| feedback.suggestions().to_owned())
.map(|suggestion| suggestion.into_iter().map(|item| item.to_string()).collect())
}
/// Display passphrase feedback to the user
///
/// Returns `true` if and only if the passphrase is weak
fn display_password_feedback(passphrase: &SafePassword) -> bool {
if passphrase.reveal().is_empty() {
// The passphrase is empty, which the scoring library doesn't handle
println!();
println!("An empty password puts your wallet at risk against an attacker with access to this device.");
println!("Use this only if you are sure that your device is safe from prying eyes!");
println!();
true
} else if let Some(feedback) = get_password_feedback(passphrase) {
// The scoring library provided feedback
println!();
println!(
"The password you chose is weak; a determined attacker with access to your device may be able to guess it."
);
println!("You may want to consider changing it to a stronger one.");
println!("Here are some suggestions:");
for suggestion in feedback {
println!("- {}", suggestion);
}
println!();
true
} else {
// The Force is strong with this one
false
}
}
/// Gets the password provided by command line argument or environment variable if available.
/// Otherwise prompts for the password to be typed in.
pub fn get_or_prompt_password(
arg_password: Option<SafePassword>,
config_password: Option<SafePassword>,
) -> Result<SafePassword, ExitError> {
if let Some(passphrase) = arg_password {
return Ok(passphrase);
}
let env = std::env::var_os(TARI_WALLET_PASSWORD);
if let Some(p) = env {
let env_password = p
.into_string()
.map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?;
return Ok(env_password.into());
}
if let Some(passphrase) = config_password {
return Ok(passphrase);
}
let password = prompt_password("Wallet password: ")?;
Ok(password)
}
fn prompt_password(prompt: &str) -> Result<SafePassword, ExitError> {
let password = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?;
Ok(SafePassword::from(password))
}
/// Allows the user to change the password of the wallet.
pub async fn change_password(
config: &ApplicationConfig,
existing: SafePassword,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<(), ExitError> {
let mut wallet = init_wallet(
config,
existing.clone(),
None,
None,
shutdown_signal,
non_interactive_mode,
)
.await?;
// Get a new passphrase
let new = get_new_passphrase("New wallet passphrase: ", "Confirm new passphrase: ")?;
// Use the existing and new passphrases to attempt to change the wallet passphrase
wallet.db.change_passphrase(&existing, &new).map_err(|e| match e {
WalletStorageError::InvalidPassphrase => | ,
_ => ExitError::new(ExitCode::DatabaseError, "Your password was not changed."),
})
}
/// Populates the PeerConfig struct from:
/// 1. The custom peer in the wallet config if it exists
/// 2. The custom peer in the wallet db if it exists
/// 3. The detected local base node if any
/// 4. The service peers defined in config they exist
/// 5. The peer seeds defined in config
pub async fn get_base_node_peer_config(
config: &ApplicationConfig,
wallet: &mut WalletSqlite,
non_interactive_mode: bool,
) -> Result<PeerConfig, ExitError> {
let mut use_custom_base_node_peer = false;
let mut selected_base_node = match config.wallet.custom_base_node {
Some(ref custom) => SeedPeer::from_str(custom)
.map(|node| Some(Peer::from(node)))
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed custom base node: {}", err)))?,
None => {
if let Some(custom_base_node_peer) = get_custom_base_node_peer_from_db(wallet) {
use_custom_base_node_peer = true;
Some(custom_base_node_peer)
} else {
None
}
},
};
// If the user has not explicitly set a base node in the config, we try detect one
if!non_interactive_mode && config.wallet.custom_base_node.is_none() &&!use_custom_base_node_peer {
if let Some(detected_node) = detect_local_base_node(config.wallet.network).await {
match selected_base_node {
Some(ref base_node) if base_node.public_key == detected_node.public_key => {
// Skip asking because it's already set
},
Some(_) | None => {
println!(
"Local Base Node detected with public key {} and address {}",
detected_node.public_key,
detected_node
.addresses
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
);
if prompt(
"Would you like to use this base node? IF YOU DID NOT START THIS BASE NODE YOU SHOULD SELECT \
NO (Y/n)",
) {
let address = detected_node.addresses.first().ok_or_else(|| {
ExitError::new(ExitCode::ConfigError, "No address found for detected base node")
})?;
set_custom_base_node_peer_in_db(wallet, &detected_node.public_key, address)?;
selected_base_node = Some(detected_node.into());
}
},
}
}
}
// config
let base_node_peers = config
.wallet
.base_node_service_peers
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?;
// peer seeds
let peer_seeds = config
.peer_seeds
.peer_seeds
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?;
let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds);
debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config);
Ok(peer_config)
}
/// Determines which mode the wallet should run in.
pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode {
// Recovery mode
if matches!(boot_mode, WalletBoot::Recovery) {
if cli.non_interactive_mode {
return WalletMode::RecoveryDaemon;
} else {
return WalletMode::RecoveryTui;
}
}
match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) {
// TUI mode
(false, None, None) => WalletMode::Tui,
// GRPC mode
(true, None, None) => WalletMode::Grpc,
// Script mode
(_, Some(path), None) => WalletMode::Script(path),
// Command mode
(_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command),
// Invalid combinations
_ => WalletMode::Invalid,
}
}
/// Set up the app environment and state for use by the UI
#[allow(clippy::too_many_lines)]
pub async fn init_wallet(
config: &ApplicationConfig,
arg_password: SafePassword,
seed_words_file_name: Option<PathBuf>,
recovery_seed: Option<CipherSeed>,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<WalletSqlite, ExitError> {
fs::create_dir_all(
config
.wallet
.db_file
.parent()
.expect("console_wallet_db_file cannot be set to a root directory"),
)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?;
fs::create_dir_all(&config.wallet.p2p.datastore_path)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?;
debug!(target: LOG_TARGET, "Running Wallet database migrations");
let db_path = &config.wallet.db_file;
// wallet should be encrypted from the beginning, so we must require a password to be provided by the user
let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) =
initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?;
let wallet_db = WalletDatabase::new(wallet_backend);
let output_db = OutputManagerDatabase::new(output_manager_backend.clone());
debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",);
let node_addresses = if config.wallet.p2p.public_addresses.is_empty() {
match wallet_db.get_node_address()? {
Some(addr) => MultiaddrList::from(vec![addr]),
None => MultiaddrList::default(),
}
} else {
config.wallet.p2p.public_addresses.clone()
};
let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?;
let node_identity = match config.wallet.identity_file.as_ref() {
Some(identity_file) => {
warn!(
target: LOG_TARGET,
"Node identity overridden by file {}",
identity_file.to_string_lossy()
);
setup_node_identity(
identity_file,
node_addresses.to_vec(),
true,
PeerFeatures::COMMUNICATION_CLIENT,
)?
},
None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses.to_vec())?,
};
let mut wallet_config = config.wallet.clone();
if let TransportType::Tor = config.wallet.p2p.transport.transport_type {
wallet_config.p2p.transport.tor.identity = wallet_db.get_tor_id()?;
}
let consensus_manager = ConsensusManager::builder(config.wallet.network)
.build()
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?;
let factories = CryptoFactories::default();
let mut wallet = Wallet::start(
wallet_config,
config.peer_seeds.clone(),
config.auto_update.clone(),
node_identity,
consensus_manager,
factories,
wallet_db,
output_db,
transaction_backend,
output_manager_backend,
contacts_backend,
key_manager_backend,
shutdown_signal,
master_seed,
)
.await
.map_err(|e| match e {
WalletError::CommsInitializationError(cie) => cie.to_exit_error(),
e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)),
})?;
if let Some(hs) = wallet.comms.hidden_service() {
wallet
.db
.set_tor_identity(hs.tor_identity().clone())
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?;
}
if let Some(file_name) = seed_words_file_name {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" ");
let _result = fs::write(file_name, seed_words.reveal()).map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Problem writing seed words to file: {}", e),
)
});
};
Ok(wallet)
}
async fn detect_local_base_node(network: Network) -> Option<SeedPeer> {
use minotari_app_grpc::tari_rpc::{base_node_client::BaseNodeClient, Empty};
let addr = format!(
"http://127.0.0.1:{}",
grpc_default_port(ApplicationType::BaseNode, network)
);
debug!(target: LOG_TARGET, "Checking for local base node at {}", addr);
let mut node_conn = match BaseNodeClient::connect(addr).await.ok() {
Some(conn) => conn,
None => {
debug!(target: LOG_TARGET, "No local base node detected");
return None;
},
};
let resp = node_conn.identify(Empty {}).await.ok()?;
let identity = resp.get_ref();
let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?;
let addresses = identity
.public_addresses
.iter()
.filter_map(|s| Multiaddr::from_str(s).ok())
.collect::<Vec<_>>();
debug!(
target: LOG_TARGET,
"Local base node found with pk={} and addresses={}",
public_key.to_hex(),
addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>().join(",")
);
Some(SeedPeer::new(public_key, addresses))
}
fn setup_identity_from_db<D: WalletBackend +'static>(
wallet_db: &WalletDatabase<D>,
master_seed: &CipherSeed,
node_addresses: Vec<Multiaddr>,
) -> Result<Arc<NodeIdentity>, ExitError> {
let node_features = wallet_db
.get_node_features()?
.unwrap_or(PeerFeatures::COMMUNICATION_CLIENT);
let identity_sig = wallet_db.get_comms_identity_signature()?;
let comms_secret_key = derive_comms_secret_key(master_seed)?;
// This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig
// to None
let identity_sig = identity_sig.filter(|sig| {
let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key);
sig.is_valid(&comms_public_key, node_features, &node_addresses)
});
// SAFETY: we are manually checking the validity of this signature before adding Some(..)
let node_identity = Arc::new(NodeIdentity::with_signature_unchecked(
comms_secret_key,
node_addresses,
node_features,
identity_sig,
));
if!node_identity.is_signed() {
node_identity.sign();
// unreachable panic: signed above
let sig = node_identity
.identity_signature_read()
.as_ref()
.expect("unreachable panic")
.clone();
wallet_db.set_comms_identity_signature(sig)?;
}
Ok(node_identity)
}
/// Starts the wallet by setting the base node peer, and restarting the transaction and broadcast protocols.
pub async fn start_wallet(
wallet: &mut WalletSqlite,
base_node: &Peer,
wallet_mode: &WalletMode,
) -> Result<(), ExitError> {
debug!(target: LOG_TARGET, "Setting base node peer");
let net_address = base_node
.addresses
.best()
.ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?;
wallet
.set_base_node_peer(base_node.public_key.clone(), net_address.address().clone())
.await
.map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Error setting wallet base node peer. {}", e),
)
})?;
// Restart transaction protocols if not running in script or command modes
if!matches!(wallet_mode, WalletMode::Command(_)) &&!matches!(wallet_mode, WalletMode::Script(_)) {
// NOTE: https://github.com/tari-project/tari/issues/5227
debug!("revalidating all transactions");
if let Err(e) = wallet.transaction_service.revalidate_all_transactions().await {
error!(target: LOG_TARGET, "Failed to revalidate all transactions: {}", e);
}
debug!("restarting transaction protocols");
if let Err(e) = wallet.transaction_service.restart_transaction_protocols().await {
error!(target: LOG_TARGET, "Problem restarting transaction protocols: {}", e);
}
debug!("validating transactions");
if let Err(e) = wallet.transaction_service.validate_transactions().await {
error!(
target: LOG_TARGET,
"Problem validating and restarting transaction protocols: {}", e
);
}
// validate transaction outputs
validate_txos(wallet).await?;
}
Ok(())
}
async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitError> {
debug!(target: LOG_TARGET, "Starting TXO validations.");
wallet.output_manager_service.validate_txos().await.map_err(|e| {
error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e);
ExitError::new(ExitCode::WalletError, e)
})?;
debug!(target: LOG_TARGET, "TXO validations started.");
Ok(())
}
pub(crate) fn confirm_seed_words(wallet: &mut WalletSqlite) -> Result<(), ExitError> {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?;
println!();
println!("=========================");
println!(" IMPORTANT! ");
println!("=========================");
println!("These are your wallet seed words.");
println!("They can be used to recover your wallet and funds.");
println!("WRITE THEM DOWN OR COPY THEM NOW. THIS IS YOUR ONLY CHANCE TO DO SO.");
println!();
println!("=========================");
println!("{}", seed_words.join(" ").reveal());
println!("=========================");
println!("\x07"); // beep!
let mut rl = Editor::<()>::new();
loop {
println!("I confirm that I will never see these seed words again.");
println!(r#"Type the word "confirm" to continue."#);
let readline = rl.readline(">> ");
match readline {
| {
ExitError::new(ExitCode::IncorrectOrEmptyPassword, "Your password was not changed.")
} | conditional_block |
pattern.rs | use core::{cmp, fmt, mem, u16, usize};
use alloc::{string::String, vec, vec::Vec};
use crate::packed::api::MatchKind;
/// The type used for representing a pattern identifier.
///
/// We don't use `usize` here because our packed searchers don't scale to
/// huge numbers of patterns, so we keep things a bit smaller.
pub type PatternID = u16;
/// A non-empty collection of non-empty patterns to search for.
///
/// This collection of patterns is what is passed around to both execute
/// searches and to construct the searchers themselves. Namely, this permits
/// searches to avoid copying all of the patterns, and allows us to keep only
/// one copy throughout all packed searchers.
///
/// Note that this collection is not a set. The same pattern can appear more
/// than once.
#[derive(Clone, Debug)]
pub struct Patterns {
/// The match semantics supported by this collection of patterns.
///
/// The match semantics determines the order of the iterator over patterns.
/// For leftmost-first, patterns are provided in the same order as were
/// provided by the caller. For leftmost-longest, patterns are provided in
/// descending order of length, with ties broken by the order in which they
/// were provided by the caller.
kind: MatchKind,
/// The collection of patterns, indexed by their identifier.
by_id: Vec<Vec<u8>>,
/// The order of patterns defined for iteration, given by pattern
/// identifiers. The order of `by_id` and `order` is always the same for
/// leftmost-first semantics, but may be different for leftmost-longest
/// semantics.
order: Vec<PatternID>,
/// The length of the smallest pattern, in bytes.
minimum_len: usize,
/// The largest pattern identifier. This should always be equivalent to
/// the number of patterns minus one in this collection.
max_pattern_id: PatternID,
/// The total number of pattern bytes across the entire collection. This
/// is used for reporting total heap usage in constant time.
total_pattern_bytes: usize,
}
impl Patterns {
/// Create a new collection of patterns for the given match semantics. The
/// ID of each pattern is the index of the pattern at which it occurs in
/// the `by_id` slice.
///
/// If any of the patterns in the slice given are empty, then this panics.
/// Similarly, if the number of patterns given is zero, then this also
/// panics.
pub fn new() -> Patterns {
Patterns {
kind: MatchKind::default(),
by_id: vec![],
order: vec![],
minimum_len: usize::MAX,
max_pattern_id: 0,
total_pattern_bytes: 0,
}
}
/// Add a pattern to this collection.
///
/// This panics if the pattern given is empty.
pub fn add(&mut self, bytes: &[u8]) {
assert!(!bytes.is_empty());
assert!(self.by_id.len() <= u16::MAX as usize);
let id = self.by_id.len() as u16;
self.max_pattern_id = id;
self.order.push(id);
self.by_id.push(bytes.to_vec());
self.minimum_len = cmp::min(self.minimum_len, bytes.len());
self.total_pattern_bytes += bytes.len();
}
/// Set the match kind semantics for this collection of patterns.
///
/// If the kind is not set, then the default is leftmost-first.
pub fn set_match_kind(&mut self, kind: MatchKind) {
self.kind = kind;
match self.kind {
MatchKind::LeftmostFirst => {
self.order.sort();
}
MatchKind::LeftmostLongest => {
let (order, by_id) = (&mut self.order, &mut self.by_id);
order.sort_by(|&id1, &id2| {
by_id[id1 as usize]
.len()
.cmp(&by_id[id2 as usize].len())
.reverse()
});
}
}
}
/// Return the number of patterns in this collection.
///
/// This is guaranteed to be greater than zero.
pub fn len(&self) -> usize {
self.by_id.len()
}
/// Returns true if and only if this collection of patterns is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the approximate total amount of heap used by these patterns, in
/// units of bytes.
pub fn memory_usage(&self) -> usize {
self.order.len() * mem::size_of::<PatternID>()
+ self.by_id.len() * mem::size_of::<Vec<u8>>()
+ self.total_pattern_bytes
}
/// Clears all heap memory associated with this collection of patterns and
/// resets all state such that it is a valid empty collection.
pub fn reset(&mut self) {
self.kind = MatchKind::default();
self.by_id.clear();
self.order.clear();
self.minimum_len = usize::MAX;
self.max_pattern_id = 0;
}
/// Return the maximum pattern identifier in this collection. This can be
/// useful in searchers for ensuring that the collection of patterns they
/// are provided at search time and at build time have the same size.
pub fn max_pattern_id(&self) -> PatternID {
assert_eq!((self.max_pattern_id + 1) as usize, self.len());
self.max_pattern_id
}
/// Returns the length, in bytes, of the smallest pattern.
///
/// This is guaranteed to be at least one.
pub fn minimum_len(&self) -> usize {
self.minimum_len
}
/// Returns the match semantics used by these patterns.
pub fn match_kind(&self) -> &MatchKind {
&self.kind
}
/// Return the pattern with the given identifier. If such a pattern does
/// not exist, then this panics.
pub fn get(&self, id: PatternID) -> Pattern<'_> {
Pattern(&self.by_id[id as usize])
}
/// Return the pattern with the given identifier without performing bounds
/// checks.
///
/// # Safety
///
/// Callers must ensure that a pattern with the given identifier exists
/// before using this method.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> {
Pattern(self.by_id.get_unchecked(id as usize))
}
/// Return an iterator over all the patterns in this collection, in the
/// order in which they should be matched.
///
/// Specifically, in a naive multi-pattern matcher, the following is
/// guaranteed to satisfy the match semantics of this collection of
/// patterns:
///
/// ```ignore
/// for i in 0..haystack.len():
/// for p in patterns.iter():
/// if haystack[i..].starts_with(p.bytes()):
/// return Match(p.id(), i, i + p.bytes().len())
/// ```
///
/// Namely, among the patterns in a collection, if they are matched in
/// the order provided by this iterator, then the result is guaranteed
/// to satisfy the correct match semantics. (Either leftmost-first or
/// leftmost-longest.)
pub fn iter(&self) -> PatternIter<'_> {
PatternIter { patterns: self, i: 0 }
}
}
/// An iterator over the patterns in the `Patterns` collection.
///
/// The order of the patterns provided by this iterator is consistent with the
/// match semantics of the originating collection of patterns.
///
/// The lifetime `'p` corresponds to the lifetime of the collection of patterns
/// this is iterating over.
#[derive(Debug)]
pub struct PatternIter<'p> {
patterns: &'p Patterns,
i: usize,
}
impl<'p> Iterator for PatternIter<'p> {
type Item = (PatternID, Pattern<'p>);
fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> {
if self.i >= self.patterns.len() {
return None;
}
let id = self.patterns.order[self.i];
let p = self.patterns.get(id);
self.i += 1;
Some((id, p))
}
}
/// A pattern that is used in packed searching.
#[derive(Clone)]
pub struct Pattern<'a>(&'a [u8]);
impl<'a> fmt::Debug for Pattern<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Pattern")
.field("lit", &String::from_utf8_lossy(&self.0))
.finish()
}
}
impl<'p> Pattern<'p> {
/// Returns the length of this pattern, in bytes.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the bytes of this pattern.
pub fn bytes(&self) -> &[u8] {
&self.0
}
/// Returns the first `len` low nybbles from this pattern. If this pattern
/// is shorter than `len`, then this panics.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub fn low_nybbles(&self, len: usize) -> Vec<u8> {
let mut nybs = vec![];
for &b in self.bytes().iter().take(len) {
nybs.push(b & 0xF);
}
nybs
}
/// Returns true if this pattern is a prefix of the given bytes.
#[inline(always)]
pub fn is_prefix(&self, bytes: &[u8]) -> bool {
self.len() <= bytes.len() && self.equals(&bytes[..self.len()])
}
/// Returns true if and only if this pattern equals the given bytes.
#[inline(always)]
pub fn equals(&self, bytes: &[u8]) -> bool | }
}
return true;
}
// When we have 4 or more bytes to compare, then proceed in chunks of 4
// at a time using unaligned loads.
//
// Also, why do 4 byte loads instead of, say, 8 byte loads? The reason
// is that this particular version of memcmp is likely to be called
// with tiny needles. That means that if we do 8 byte loads, then a
// higher proportion of memcmp calls will use the slower variant above.
// With that said, this is a hypothesis and is only loosely supported
// by benchmarks. There's likely some improvement that could be made
// here. The main thing here though is to optimize for latency, not
// throughput.
// SAFETY: Via the conditional above, we know that both `px` and `py`
// have the same length, so `px < pxend` implies that `py < pyend`.
// Thus, derefencing both `px` and `py` in the loop below is safe.
//
// Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
// end of of `px` and `py`. Thus, the final dereference outside of the
// loop is guaranteed to be valid. (The final comparison will overlap
// with the last comparison done in the loop for lengths that aren't
// multiples of four.)
//
// Finally, we needn't worry about alignment here, since we do
// unaligned loads.
unsafe {
let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
while px < pxend {
let vx = (px as *const u32).read_unaligned();
let vy = (py as *const u32).read_unaligned();
if vx!= vy {
return false;
}
px = px.add(4);
py = py.add(4);
}
let vx = (pxend as *const u32).read_unaligned();
let vy = (pyend as *const u32).read_unaligned();
vx == vy
}
}
}
| {
// Why not just use memcmp for this? Well, memcmp requires calling out
// to libc, and this routine is called in fairly hot code paths. Other
// than just calling out to libc, it also seems to result in worse
// codegen. By rolling our own memcpy in pure Rust, it seems to appear
// more friendly to the optimizer.
//
// This results in an improvement in just about every benchmark. Some
// smaller than others, but in some cases, up to 30% faster.
let (x, y) = (self.bytes(), bytes);
if x.len() != y.len() {
return false;
}
// If we don't have enough bytes to do 4-byte at a time loads, then
// fall back to the naive slow version.
if x.len() < 4 {
for (&b1, &b2) in x.iter().zip(y) {
if b1 != b2 {
return false; | identifier_body |
pattern.rs | use core::{cmp, fmt, mem, u16, usize};
use alloc::{string::String, vec, vec::Vec};
use crate::packed::api::MatchKind;
/// The type used for representing a pattern identifier.
///
/// We don't use `usize` here because our packed searchers don't scale to
/// huge numbers of patterns, so we keep things a bit smaller.
pub type PatternID = u16;
/// A non-empty collection of non-empty patterns to search for.
///
/// This collection of patterns is what is passed around to both execute
/// searches and to construct the searchers themselves. Namely, this permits
/// searches to avoid copying all of the patterns, and allows us to keep only
/// one copy throughout all packed searchers.
///
/// Note that this collection is not a set. The same pattern can appear more
/// than once.
#[derive(Clone, Debug)]
pub struct Patterns {
/// The match semantics supported by this collection of patterns.
///
/// The match semantics determines the order of the iterator over patterns.
/// For leftmost-first, patterns are provided in the same order as were
/// provided by the caller. For leftmost-longest, patterns are provided in
/// descending order of length, with ties broken by the order in which they
/// were provided by the caller.
kind: MatchKind,
/// The collection of patterns, indexed by their identifier.
by_id: Vec<Vec<u8>>,
/// The order of patterns defined for iteration, given by pattern
/// identifiers. The order of `by_id` and `order` is always the same for
/// leftmost-first semantics, but may be different for leftmost-longest
/// semantics.
order: Vec<PatternID>,
/// The length of the smallest pattern, in bytes.
minimum_len: usize,
/// The largest pattern identifier. This should always be equivalent to
/// the number of patterns minus one in this collection.
max_pattern_id: PatternID,
/// The total number of pattern bytes across the entire collection. This
/// is used for reporting total heap usage in constant time.
total_pattern_bytes: usize,
}
impl Patterns {
/// Create a new collection of patterns for the given match semantics. The
/// ID of each pattern is the index of the pattern at which it occurs in
/// the `by_id` slice.
///
/// If any of the patterns in the slice given are empty, then this panics.
/// Similarly, if the number of patterns given is zero, then this also
/// panics.
pub fn new() -> Patterns {
Patterns {
kind: MatchKind::default(),
by_id: vec![],
order: vec![],
minimum_len: usize::MAX,
max_pattern_id: 0,
total_pattern_bytes: 0,
}
}
/// Add a pattern to this collection.
///
/// This panics if the pattern given is empty.
pub fn add(&mut self, bytes: &[u8]) {
assert!(!bytes.is_empty());
assert!(self.by_id.len() <= u16::MAX as usize);
let id = self.by_id.len() as u16;
self.max_pattern_id = id;
self.order.push(id);
self.by_id.push(bytes.to_vec());
self.minimum_len = cmp::min(self.minimum_len, bytes.len());
self.total_pattern_bytes += bytes.len();
}
/// Set the match kind semantics for this collection of patterns.
///
/// If the kind is not set, then the default is leftmost-first.
pub fn set_match_kind(&mut self, kind: MatchKind) {
self.kind = kind;
match self.kind {
MatchKind::LeftmostFirst => {
self.order.sort();
}
MatchKind::LeftmostLongest => {
let (order, by_id) = (&mut self.order, &mut self.by_id);
order.sort_by(|&id1, &id2| {
by_id[id1 as usize]
.len()
.cmp(&by_id[id2 as usize].len())
.reverse()
});
}
}
}
/// Return the number of patterns in this collection.
///
/// This is guaranteed to be greater than zero.
pub fn len(&self) -> usize {
self.by_id.len()
}
/// Returns true if and only if this collection of patterns is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the approximate total amount of heap used by these patterns, in
/// units of bytes.
pub fn memory_usage(&self) -> usize {
self.order.len() * mem::size_of::<PatternID>()
+ self.by_id.len() * mem::size_of::<Vec<u8>>()
+ self.total_pattern_bytes
}
/// Clears all heap memory associated with this collection of patterns and
/// resets all state such that it is a valid empty collection.
pub fn reset(&mut self) {
self.kind = MatchKind::default();
self.by_id.clear();
self.order.clear();
self.minimum_len = usize::MAX;
self.max_pattern_id = 0;
}
/// Return the maximum pattern identifier in this collection. This can be
/// useful in searchers for ensuring that the collection of patterns they
/// are provided at search time and at build time have the same size.
pub fn max_pattern_id(&self) -> PatternID {
assert_eq!((self.max_pattern_id + 1) as usize, self.len());
self.max_pattern_id
}
/// Returns the length, in bytes, of the smallest pattern.
///
/// This is guaranteed to be at least one.
pub fn minimum_len(&self) -> usize {
self.minimum_len
}
/// Returns the match semantics used by these patterns.
pub fn match_kind(&self) -> &MatchKind {
&self.kind
}
/// Return the pattern with the given identifier. If such a pattern does
/// not exist, then this panics.
pub fn get(&self, id: PatternID) -> Pattern<'_> {
Pattern(&self.by_id[id as usize])
}
/// Return the pattern with the given identifier without performing bounds
/// checks.
///
/// # Safety
///
/// Callers must ensure that a pattern with the given identifier exists
/// before using this method.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> {
Pattern(self.by_id.get_unchecked(id as usize))
}
/// Return an iterator over all the patterns in this collection, in the
/// order in which they should be matched.
///
/// Specifically, in a naive multi-pattern matcher, the following is
/// guaranteed to satisfy the match semantics of this collection of
/// patterns:
///
/// ```ignore
/// for i in 0..haystack.len():
/// for p in patterns.iter():
/// if haystack[i..].starts_with(p.bytes()):
/// return Match(p.id(), i, i + p.bytes().len())
/// ```
///
/// Namely, among the patterns in a collection, if they are matched in
/// the order provided by this iterator, then the result is guaranteed
/// to satisfy the correct match semantics. (Either leftmost-first or
/// leftmost-longest.)
pub fn iter(&self) -> PatternIter<'_> {
PatternIter { patterns: self, i: 0 }
}
}
/// An iterator over the patterns in the `Patterns` collection.
///
/// The order of the patterns provided by this iterator is consistent with the
/// match semantics of the originating collection of patterns.
///
/// The lifetime `'p` corresponds to the lifetime of the collection of patterns
/// this is iterating over.
#[derive(Debug)]
pub struct PatternIter<'p> {
patterns: &'p Patterns,
i: usize,
}
impl<'p> Iterator for PatternIter<'p> {
type Item = (PatternID, Pattern<'p>);
fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> { | self.i += 1;
Some((id, p))
}
}
/// A pattern that is used in packed searching.
#[derive(Clone)]
pub struct Pattern<'a>(&'a [u8]);
impl<'a> fmt::Debug for Pattern<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Pattern")
.field("lit", &String::from_utf8_lossy(&self.0))
.finish()
}
}
impl<'p> Pattern<'p> {
/// Returns the length of this pattern, in bytes.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the bytes of this pattern.
pub fn bytes(&self) -> &[u8] {
&self.0
}
/// Returns the first `len` low nybbles from this pattern. If this pattern
/// is shorter than `len`, then this panics.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub fn low_nybbles(&self, len: usize) -> Vec<u8> {
let mut nybs = vec![];
for &b in self.bytes().iter().take(len) {
nybs.push(b & 0xF);
}
nybs
}
/// Returns true if this pattern is a prefix of the given bytes.
#[inline(always)]
pub fn is_prefix(&self, bytes: &[u8]) -> bool {
self.len() <= bytes.len() && self.equals(&bytes[..self.len()])
}
/// Returns true if and only if this pattern equals the given bytes.
#[inline(always)]
pub fn equals(&self, bytes: &[u8]) -> bool {
// Why not just use memcmp for this? Well, memcmp requires calling out
// to libc, and this routine is called in fairly hot code paths. Other
// than just calling out to libc, it also seems to result in worse
// codegen. By rolling our own memcpy in pure Rust, it seems to appear
// more friendly to the optimizer.
//
// This results in an improvement in just about every benchmark. Some
// smaller than others, but in some cases, up to 30% faster.
let (x, y) = (self.bytes(), bytes);
if x.len()!= y.len() {
return false;
}
// If we don't have enough bytes to do 4-byte at a time loads, then
// fall back to the naive slow version.
if x.len() < 4 {
for (&b1, &b2) in x.iter().zip(y) {
if b1!= b2 {
return false;
}
}
return true;
}
// When we have 4 or more bytes to compare, then proceed in chunks of 4
// at a time using unaligned loads.
//
// Also, why do 4 byte loads instead of, say, 8 byte loads? The reason
// is that this particular version of memcmp is likely to be called
// with tiny needles. That means that if we do 8 byte loads, then a
// higher proportion of memcmp calls will use the slower variant above.
// With that said, this is a hypothesis and is only loosely supported
// by benchmarks. There's likely some improvement that could be made
// here. The main thing here though is to optimize for latency, not
// throughput.
// SAFETY: Via the conditional above, we know that both `px` and `py`
// have the same length, so `px < pxend` implies that `py < pyend`.
// Thus, derefencing both `px` and `py` in the loop below is safe.
//
// Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
// end of of `px` and `py`. Thus, the final dereference outside of the
// loop is guaranteed to be valid. (The final comparison will overlap
// with the last comparison done in the loop for lengths that aren't
// multiples of four.)
//
// Finally, we needn't worry about alignment here, since we do
// unaligned loads.
unsafe {
let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
while px < pxend {
let vx = (px as *const u32).read_unaligned();
let vy = (py as *const u32).read_unaligned();
if vx!= vy {
return false;
}
px = px.add(4);
py = py.add(4);
}
let vx = (pxend as *const u32).read_unaligned();
let vy = (pyend as *const u32).read_unaligned();
vx == vy
}
}
} | if self.i >= self.patterns.len() {
return None;
}
let id = self.patterns.order[self.i];
let p = self.patterns.get(id); | random_line_split |
pattern.rs | use core::{cmp, fmt, mem, u16, usize};
use alloc::{string::String, vec, vec::Vec};
use crate::packed::api::MatchKind;
/// The type used for representing a pattern identifier.
///
/// We don't use `usize` here because our packed searchers don't scale to
/// huge numbers of patterns, so we keep things a bit smaller.
pub type PatternID = u16;
/// A non-empty collection of non-empty patterns to search for.
///
/// This collection of patterns is what is passed around to both execute
/// searches and to construct the searchers themselves. Namely, this permits
/// searches to avoid copying all of the patterns, and allows us to keep only
/// one copy throughout all packed searchers.
///
/// Note that this collection is not a set. The same pattern can appear more
/// than once.
#[derive(Clone, Debug)]
pub struct Patterns {
/// The match semantics supported by this collection of patterns.
///
/// The match semantics determines the order of the iterator over patterns.
/// For leftmost-first, patterns are provided in the same order as were
/// provided by the caller. For leftmost-longest, patterns are provided in
/// descending order of length, with ties broken by the order in which they
/// were provided by the caller.
kind: MatchKind,
/// The collection of patterns, indexed by their identifier.
by_id: Vec<Vec<u8>>,
/// The order of patterns defined for iteration, given by pattern
/// identifiers. The order of `by_id` and `order` is always the same for
/// leftmost-first semantics, but may be different for leftmost-longest
/// semantics.
order: Vec<PatternID>,
/// The length of the smallest pattern, in bytes.
minimum_len: usize,
/// The largest pattern identifier. This should always be equivalent to
/// the number of patterns minus one in this collection.
max_pattern_id: PatternID,
/// The total number of pattern bytes across the entire collection. This
/// is used for reporting total heap usage in constant time.
total_pattern_bytes: usize,
}
impl Patterns {
/// Create a new collection of patterns for the given match semantics. The
/// ID of each pattern is the index of the pattern at which it occurs in
/// the `by_id` slice.
///
/// If any of the patterns in the slice given are empty, then this panics.
/// Similarly, if the number of patterns given is zero, then this also
/// panics.
pub fn new() -> Patterns {
Patterns {
kind: MatchKind::default(),
by_id: vec![],
order: vec![],
minimum_len: usize::MAX,
max_pattern_id: 0,
total_pattern_bytes: 0,
}
}
/// Add a pattern to this collection.
///
/// This panics if the pattern given is empty.
pub fn add(&mut self, bytes: &[u8]) {
assert!(!bytes.is_empty());
assert!(self.by_id.len() <= u16::MAX as usize);
let id = self.by_id.len() as u16;
self.max_pattern_id = id;
self.order.push(id);
self.by_id.push(bytes.to_vec());
self.minimum_len = cmp::min(self.minimum_len, bytes.len());
self.total_pattern_bytes += bytes.len();
}
/// Set the match kind semantics for this collection of patterns.
///
/// If the kind is not set, then the default is leftmost-first.
pub fn set_match_kind(&mut self, kind: MatchKind) {
self.kind = kind;
match self.kind {
MatchKind::LeftmostFirst => {
self.order.sort();
}
MatchKind::LeftmostLongest => {
let (order, by_id) = (&mut self.order, &mut self.by_id);
order.sort_by(|&id1, &id2| {
by_id[id1 as usize]
.len()
.cmp(&by_id[id2 as usize].len())
.reverse()
});
}
}
}
/// Return the number of patterns in this collection.
///
/// This is guaranteed to be greater than zero.
pub fn len(&self) -> usize {
self.by_id.len()
}
/// Returns true if and only if this collection of patterns is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the approximate total amount of heap used by these patterns, in
/// units of bytes.
pub fn memory_usage(&self) -> usize {
self.order.len() * mem::size_of::<PatternID>()
+ self.by_id.len() * mem::size_of::<Vec<u8>>()
+ self.total_pattern_bytes
}
/// Clears all heap memory associated with this collection of patterns and
/// resets all state such that it is a valid empty collection.
pub fn reset(&mut self) {
self.kind = MatchKind::default();
self.by_id.clear();
self.order.clear();
self.minimum_len = usize::MAX;
self.max_pattern_id = 0;
}
/// Return the maximum pattern identifier in this collection. This can be
/// useful in searchers for ensuring that the collection of patterns they
/// are provided at search time and at build time have the same size.
pub fn max_pattern_id(&self) -> PatternID {
assert_eq!((self.max_pattern_id + 1) as usize, self.len());
self.max_pattern_id
}
/// Returns the length, in bytes, of the smallest pattern.
///
/// This is guaranteed to be at least one.
pub fn | (&self) -> usize {
self.minimum_len
}
/// Returns the match semantics used by these patterns.
pub fn match_kind(&self) -> &MatchKind {
&self.kind
}
/// Return the pattern with the given identifier. If such a pattern does
/// not exist, then this panics.
pub fn get(&self, id: PatternID) -> Pattern<'_> {
Pattern(&self.by_id[id as usize])
}
/// Return the pattern with the given identifier without performing bounds
/// checks.
///
/// # Safety
///
/// Callers must ensure that a pattern with the given identifier exists
/// before using this method.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> {
Pattern(self.by_id.get_unchecked(id as usize))
}
/// Return an iterator over all the patterns in this collection, in the
/// order in which they should be matched.
///
/// Specifically, in a naive multi-pattern matcher, the following is
/// guaranteed to satisfy the match semantics of this collection of
/// patterns:
///
/// ```ignore
/// for i in 0..haystack.len():
/// for p in patterns.iter():
/// if haystack[i..].starts_with(p.bytes()):
/// return Match(p.id(), i, i + p.bytes().len())
/// ```
///
/// Namely, among the patterns in a collection, if they are matched in
/// the order provided by this iterator, then the result is guaranteed
/// to satisfy the correct match semantics. (Either leftmost-first or
/// leftmost-longest.)
pub fn iter(&self) -> PatternIter<'_> {
PatternIter { patterns: self, i: 0 }
}
}
/// An iterator over the patterns in the `Patterns` collection.
///
/// The order of the patterns provided by this iterator is consistent with the
/// match semantics of the originating collection of patterns.
///
/// The lifetime `'p` corresponds to the lifetime of the collection of patterns
/// this is iterating over.
#[derive(Debug)]
pub struct PatternIter<'p> {
patterns: &'p Patterns,
i: usize,
}
impl<'p> Iterator for PatternIter<'p> {
type Item = (PatternID, Pattern<'p>);
fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> {
if self.i >= self.patterns.len() {
return None;
}
let id = self.patterns.order[self.i];
let p = self.patterns.get(id);
self.i += 1;
Some((id, p))
}
}
/// A pattern that is used in packed searching.
#[derive(Clone)]
pub struct Pattern<'a>(&'a [u8]);
impl<'a> fmt::Debug for Pattern<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Pattern")
.field("lit", &String::from_utf8_lossy(&self.0))
.finish()
}
}
impl<'p> Pattern<'p> {
/// Returns the length of this pattern, in bytes.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the bytes of this pattern.
pub fn bytes(&self) -> &[u8] {
&self.0
}
/// Returns the first `len` low nybbles from this pattern. If this pattern
/// is shorter than `len`, then this panics.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub fn low_nybbles(&self, len: usize) -> Vec<u8> {
let mut nybs = vec![];
for &b in self.bytes().iter().take(len) {
nybs.push(b & 0xF);
}
nybs
}
/// Returns true if this pattern is a prefix of the given bytes.
#[inline(always)]
pub fn is_prefix(&self, bytes: &[u8]) -> bool {
self.len() <= bytes.len() && self.equals(&bytes[..self.len()])
}
/// Returns true if and only if this pattern equals the given bytes.
#[inline(always)]
pub fn equals(&self, bytes: &[u8]) -> bool {
// Why not just use memcmp for this? Well, memcmp requires calling out
// to libc, and this routine is called in fairly hot code paths. Other
// than just calling out to libc, it also seems to result in worse
// codegen. By rolling our own memcpy in pure Rust, it seems to appear
// more friendly to the optimizer.
//
// This results in an improvement in just about every benchmark. Some
// smaller than others, but in some cases, up to 30% faster.
let (x, y) = (self.bytes(), bytes);
if x.len()!= y.len() {
return false;
}
// If we don't have enough bytes to do 4-byte at a time loads, then
// fall back to the naive slow version.
if x.len() < 4 {
for (&b1, &b2) in x.iter().zip(y) {
if b1!= b2 {
return false;
}
}
return true;
}
// When we have 4 or more bytes to compare, then proceed in chunks of 4
// at a time using unaligned loads.
//
// Also, why do 4 byte loads instead of, say, 8 byte loads? The reason
// is that this particular version of memcmp is likely to be called
// with tiny needles. That means that if we do 8 byte loads, then a
// higher proportion of memcmp calls will use the slower variant above.
// With that said, this is a hypothesis and is only loosely supported
// by benchmarks. There's likely some improvement that could be made
// here. The main thing here though is to optimize for latency, not
// throughput.
// SAFETY: Via the conditional above, we know that both `px` and `py`
// have the same length, so `px < pxend` implies that `py < pyend`.
// Thus, derefencing both `px` and `py` in the loop below is safe.
//
// Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
// end of of `px` and `py`. Thus, the final dereference outside of the
// loop is guaranteed to be valid. (The final comparison will overlap
// with the last comparison done in the loop for lengths that aren't
// multiples of four.)
//
// Finally, we needn't worry about alignment here, since we do
// unaligned loads.
unsafe {
let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
while px < pxend {
let vx = (px as *const u32).read_unaligned();
let vy = (py as *const u32).read_unaligned();
if vx!= vy {
return false;
}
px = px.add(4);
py = py.add(4);
}
let vx = (pxend as *const u32).read_unaligned();
let vy = (pyend as *const u32).read_unaligned();
vx == vy
}
}
}
| minimum_len | identifier_name |
pattern.rs | use core::{cmp, fmt, mem, u16, usize};
use alloc::{string::String, vec, vec::Vec};
use crate::packed::api::MatchKind;
/// The type used for representing a pattern identifier.
///
/// We don't use `usize` here because our packed searchers don't scale to
/// huge numbers of patterns, so we keep things a bit smaller.
pub type PatternID = u16;
/// A non-empty collection of non-empty patterns to search for.
///
/// This collection of patterns is what is passed around to both execute
/// searches and to construct the searchers themselves. Namely, this permits
/// searches to avoid copying all of the patterns, and allows us to keep only
/// one copy throughout all packed searchers.
///
/// Note that this collection is not a set. The same pattern can appear more
/// than once.
#[derive(Clone, Debug)]
pub struct Patterns {
/// The match semantics supported by this collection of patterns.
///
/// The match semantics determines the order of the iterator over patterns.
/// For leftmost-first, patterns are provided in the same order as were
/// provided by the caller. For leftmost-longest, patterns are provided in
/// descending order of length, with ties broken by the order in which they
/// were provided by the caller.
kind: MatchKind,
/// The collection of patterns, indexed by their identifier.
by_id: Vec<Vec<u8>>,
/// The order of patterns defined for iteration, given by pattern
/// identifiers. The order of `by_id` and `order` is always the same for
/// leftmost-first semantics, but may be different for leftmost-longest
/// semantics.
order: Vec<PatternID>,
/// The length of the smallest pattern, in bytes.
minimum_len: usize,
/// The largest pattern identifier. This should always be equivalent to
/// the number of patterns minus one in this collection.
max_pattern_id: PatternID,
/// The total number of pattern bytes across the entire collection. This
/// is used for reporting total heap usage in constant time.
total_pattern_bytes: usize,
}
impl Patterns {
/// Create a new collection of patterns for the given match semantics. The
/// ID of each pattern is the index of the pattern at which it occurs in
/// the `by_id` slice.
///
/// If any of the patterns in the slice given are empty, then this panics.
/// Similarly, if the number of patterns given is zero, then this also
/// panics.
pub fn new() -> Patterns {
Patterns {
kind: MatchKind::default(),
by_id: vec![],
order: vec![],
minimum_len: usize::MAX,
max_pattern_id: 0,
total_pattern_bytes: 0,
}
}
/// Add a pattern to this collection.
///
/// This panics if the pattern given is empty.
pub fn add(&mut self, bytes: &[u8]) {
assert!(!bytes.is_empty());
assert!(self.by_id.len() <= u16::MAX as usize);
let id = self.by_id.len() as u16;
self.max_pattern_id = id;
self.order.push(id);
self.by_id.push(bytes.to_vec());
self.minimum_len = cmp::min(self.minimum_len, bytes.len());
self.total_pattern_bytes += bytes.len();
}
/// Set the match kind semantics for this collection of patterns.
///
/// If the kind is not set, then the default is leftmost-first.
pub fn set_match_kind(&mut self, kind: MatchKind) {
self.kind = kind;
match self.kind {
MatchKind::LeftmostFirst => |
MatchKind::LeftmostLongest => {
let (order, by_id) = (&mut self.order, &mut self.by_id);
order.sort_by(|&id1, &id2| {
by_id[id1 as usize]
.len()
.cmp(&by_id[id2 as usize].len())
.reverse()
});
}
}
}
/// Return the number of patterns in this collection.
///
/// This is guaranteed to be greater than zero.
pub fn len(&self) -> usize {
self.by_id.len()
}
/// Returns true if and only if this collection of patterns is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the approximate total amount of heap used by these patterns, in
/// units of bytes.
pub fn memory_usage(&self) -> usize {
self.order.len() * mem::size_of::<PatternID>()
+ self.by_id.len() * mem::size_of::<Vec<u8>>()
+ self.total_pattern_bytes
}
/// Clears all heap memory associated with this collection of patterns and
/// resets all state such that it is a valid empty collection.
pub fn reset(&mut self) {
self.kind = MatchKind::default();
self.by_id.clear();
self.order.clear();
self.minimum_len = usize::MAX;
self.max_pattern_id = 0;
}
/// Return the maximum pattern identifier in this collection. This can be
/// useful in searchers for ensuring that the collection of patterns they
/// are provided at search time and at build time have the same size.
pub fn max_pattern_id(&self) -> PatternID {
assert_eq!((self.max_pattern_id + 1) as usize, self.len());
self.max_pattern_id
}
/// Returns the length, in bytes, of the smallest pattern.
///
/// This is guaranteed to be at least one.
pub fn minimum_len(&self) -> usize {
self.minimum_len
}
/// Returns the match semantics used by these patterns.
pub fn match_kind(&self) -> &MatchKind {
&self.kind
}
/// Return the pattern with the given identifier. If such a pattern does
/// not exist, then this panics.
pub fn get(&self, id: PatternID) -> Pattern<'_> {
Pattern(&self.by_id[id as usize])
}
/// Return the pattern with the given identifier without performing bounds
/// checks.
///
/// # Safety
///
/// Callers must ensure that a pattern with the given identifier exists
/// before using this method.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> {
Pattern(self.by_id.get_unchecked(id as usize))
}
/// Return an iterator over all the patterns in this collection, in the
/// order in which they should be matched.
///
/// Specifically, in a naive multi-pattern matcher, the following is
/// guaranteed to satisfy the match semantics of this collection of
/// patterns:
///
/// ```ignore
/// for i in 0..haystack.len():
/// for p in patterns.iter():
/// if haystack[i..].starts_with(p.bytes()):
/// return Match(p.id(), i, i + p.bytes().len())
/// ```
///
/// Namely, among the patterns in a collection, if they are matched in
/// the order provided by this iterator, then the result is guaranteed
/// to satisfy the correct match semantics. (Either leftmost-first or
/// leftmost-longest.)
pub fn iter(&self) -> PatternIter<'_> {
PatternIter { patterns: self, i: 0 }
}
}
/// An iterator over the patterns in the `Patterns` collection.
///
/// The order of the patterns provided by this iterator is consistent with the
/// match semantics of the originating collection of patterns.
///
/// The lifetime `'p` corresponds to the lifetime of the collection of patterns
/// this is iterating over.
#[derive(Debug)]
pub struct PatternIter<'p> {
patterns: &'p Patterns,
i: usize,
}
impl<'p> Iterator for PatternIter<'p> {
type Item = (PatternID, Pattern<'p>);
fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> {
if self.i >= self.patterns.len() {
return None;
}
let id = self.patterns.order[self.i];
let p = self.patterns.get(id);
self.i += 1;
Some((id, p))
}
}
/// A pattern that is used in packed searching.
#[derive(Clone)]
pub struct Pattern<'a>(&'a [u8]);
impl<'a> fmt::Debug for Pattern<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Pattern")
.field("lit", &String::from_utf8_lossy(&self.0))
.finish()
}
}
impl<'p> Pattern<'p> {
/// Returns the length of this pattern, in bytes.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the bytes of this pattern.
pub fn bytes(&self) -> &[u8] {
&self.0
}
/// Returns the first `len` low nybbles from this pattern. If this pattern
/// is shorter than `len`, then this panics.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub fn low_nybbles(&self, len: usize) -> Vec<u8> {
let mut nybs = vec![];
for &b in self.bytes().iter().take(len) {
nybs.push(b & 0xF);
}
nybs
}
/// Returns true if this pattern is a prefix of the given bytes.
#[inline(always)]
pub fn is_prefix(&self, bytes: &[u8]) -> bool {
self.len() <= bytes.len() && self.equals(&bytes[..self.len()])
}
/// Returns true if and only if this pattern equals the given bytes.
#[inline(always)]
pub fn equals(&self, bytes: &[u8]) -> bool {
// Why not just use memcmp for this? Well, memcmp requires calling out
// to libc, and this routine is called in fairly hot code paths. Other
// than just calling out to libc, it also seems to result in worse
// codegen. By rolling our own memcpy in pure Rust, it seems to appear
// more friendly to the optimizer.
//
// This results in an improvement in just about every benchmark. Some
// smaller than others, but in some cases, up to 30% faster.
let (x, y) = (self.bytes(), bytes);
if x.len()!= y.len() {
return false;
}
// If we don't have enough bytes to do 4-byte at a time loads, then
// fall back to the naive slow version.
if x.len() < 4 {
for (&b1, &b2) in x.iter().zip(y) {
if b1!= b2 {
return false;
}
}
return true;
}
// When we have 4 or more bytes to compare, then proceed in chunks of 4
// at a time using unaligned loads.
//
// Also, why do 4 byte loads instead of, say, 8 byte loads? The reason
// is that this particular version of memcmp is likely to be called
// with tiny needles. That means that if we do 8 byte loads, then a
// higher proportion of memcmp calls will use the slower variant above.
// With that said, this is a hypothesis and is only loosely supported
// by benchmarks. There's likely some improvement that could be made
// here. The main thing here though is to optimize for latency, not
// throughput.
// SAFETY: Via the conditional above, we know that both `px` and `py`
// have the same length, so `px < pxend` implies that `py < pyend`.
// Thus, derefencing both `px` and `py` in the loop below is safe.
//
// Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
// end of of `px` and `py`. Thus, the final dereference outside of the
// loop is guaranteed to be valid. (The final comparison will overlap
// with the last comparison done in the loop for lengths that aren't
// multiples of four.)
//
// Finally, we needn't worry about alignment here, since we do
// unaligned loads.
unsafe {
let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
while px < pxend {
let vx = (px as *const u32).read_unaligned();
let vy = (py as *const u32).read_unaligned();
if vx!= vy {
return false;
}
px = px.add(4);
py = py.add(4);
}
let vx = (pxend as *const u32).read_unaligned();
let vy = (pyend as *const u32).read_unaligned();
vx == vy
}
}
}
| {
self.order.sort();
} | conditional_block |
segment_accountant.rs | Some(new_lsn);
self.state = Active;
}
/// Transitions a segment to being in the Inactive state.
/// Called in:
///
/// PageCache::advance_snapshot for marking when a
/// segment has been completely read
///
/// SegmentAccountant::recover for when
pub fn active_to_inactive(&mut self, lsn: Lsn, from_recovery: bool) {
trace!("setting Segment with lsn {:?} to Inactive", self.lsn());
assert_eq!(self.state, Active);
if from_recovery {
assert!(lsn >= self.lsn());
} else {
assert_eq!(self.lsn.unwrap(), lsn);
}
self.state = Inactive;
// now we can push any deferred removals to the removed set
let deferred = mem::replace(&mut self.deferred_remove, HashSet::new());
for pid in deferred {
self.remove_pid(pid, lsn);
}
}
pub fn inactive_to_draining(&mut self, lsn: Lsn) {
trace!("setting Segment with lsn {:?} to Draining", self.lsn());
assert_eq!(self.state, Inactive);
assert!(lsn >= self.lsn());
self.state = Draining;
}
pub fn draining_to_free(&mut self, lsn: Lsn) {
trace!("setting Segment with lsn {:?} to Free", self.lsn());
assert!(self.is_draining());
assert!(lsn >= self.lsn());
self.present.clear();
self.removed.clear();
self.state = Free;
}
pub fn recovery_ensure_initialized(&mut self, lsn: Lsn) {
if let Some(current_lsn) = self.lsn {
if current_lsn!= lsn {
assert!(lsn > current_lsn);
trace!("(snapshot) resetting segment to have lsn {}", lsn);
self.state = Free;
self.free_to_active(lsn);
}
} else {
trace!("(snapshot) resetting segment to have lsn {}", lsn);
self.free_to_active(lsn);
}
}
fn lsn(&self) -> Lsn {
self.lsn.unwrap()
}
/// Add a pid to the Segment. The caller must provide
/// the Segment's LSN.
pub fn insert_pid(&mut self, pid: PageID, lsn: Lsn) {
assert_eq!(lsn, self.lsn.unwrap());
// if this breaks, we didn't implement the transition
// logic right in write_to_log, and maybe a thread is
// using the SA to add pids AFTER their calls to
// res.complete() worked.
assert_eq!(self.state, Active);
assert!(!self.removed.contains(&pid));
self.present.insert(pid);
}
/// Mark that a pid in this Segment has been relocated.
/// The caller must provide the LSN of the removal.
pub fn remove_pid(&mut self, pid: PageID, lsn: Lsn) {
// TODO this could be racy?
assert!(lsn >= self.lsn.unwrap());
match self.state {
Active => {
// we have received a removal before
// transferring this segment to Inactive, so
// we defer this pid's removal until the transfer.
self.deferred_remove.insert(pid);
}
Inactive | Draining => {
self.present.remove(&pid);
self.removed.insert(pid);
}
Free => panic!("remove_pid called on a Free Segment"),
}
}
fn live_pct(&self) -> f64 {
let total = self.present.len() + self.removed.len();
self.present.len() as f64 / total as f64
}
fn can_free(&self) -> bool {
self.state == Draining && self.is_empty()
}
pub fn is_empty(&self) -> bool {
self.present.is_empty()
}
}
impl SegmentAccountant {
pub fn new(config: Config) -> SegmentAccountant {
let mut ret = SegmentAccountant::default();
ret.config = config;
ret.scan_segment_lsns();
ret
}
/// Called from the `PageCache` recovery logic, this initializes the
/// `SegmentAccountant` based on recovered segment information.
pub fn initialize_from_segments(&mut self, mut segments: Vec<Segment>) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<LogID> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(_lsn, lid)| *lid)
.collect();
for (idx, ref mut segment) in segments.iter_mut().enumerate() {
if segment.lsn.is_none() {
continue;
}
let segment_start = idx as LogID *
self.config.get_io_buf_size() as LogID;
let lsn = segment.lsn();
// populate free and to_clean if the segment has seen
if segment.is_empty() {
// can be reused immediately
if segment.state == Active {
segment.active_to_inactive(lsn, true);
}
if segment.state == Inactive {
segment.inactive_to_draining(lsn);
}
self.to_clean.remove(&segment_start);
trace!("pid {} freed @initialize_from_segments", segment_start);
if logical_tail.contains(&segment_start) {
// we depend on the invariant that the last segments
// always link together, so that we can detect torn
// segments during recovery.
self.ensure_safe_free_distance();
}
segment.draining_to_free(lsn);
if self.tip!= segment_start &&
!self.free.lock().unwrap().contains(&segment_start)
{
// don't give out this segment twice
trace!(
"freeing segment {} from initialize_from_segments, tip: {}",
segment_start,
self.tip
);
self.free_segment(segment_start, true);
}
} else if segment.live_pct() <=
self.config.get_segment_cleanup_threshold()
{
// can be cleaned
trace!(
"setting segment {} to Draining from initialize_from_segments",
segment_start
);
if segment.state == Active {
segment.active_to_inactive(lsn, true);
}
segment.inactive_to_draining(lsn);
self.to_clean.insert(segment_start);
self.free.lock().unwrap().retain(|&s| s!= segment_start);
} else {
self.free.lock().unwrap().retain(|&s| s!= segment_start);
}
}
self.set_last_given();
if!segments.is_empty() {
trace!("initialized self.segments to {:?}", segments);
for (i, segment) in segments.into_iter().enumerate() {
// we should not forget about segments that we've added
// during the initial segment scan, but freed for being
// empty, as that's where we set an LSN for them.
self.segments[i] = segment;
}
} else {
// this is basically just for when we recover with a single
// empty-yet-initialized segment
debug!(
"pagecache recovered no segments so not initializing from any"
);
}
}
fn set_last_given(&mut self) {
let new_max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((_lsn, lid)) = new_max {
trace!("setting last_given to {}", lid);
self.last_given = lid;
}
}
// Mark a specific segment as being present at a particular
// file offset.
fn recover(&mut self, lsn: Lsn, lid: LogID) {
trace!("recovered segment lsn {} at lid {}", lsn, lid);
let io_buf_size = self.config.get_io_buf_size() as LogID;
let idx = self.lid_to_idx(lid);
assert!(!(lsn == 0 && lid!= 0), "lsn 0 provided with non-zero lid");
if!self.segments[idx].is_empty() {
self.segments[idx].free_to_active(lsn);
let segment_lsn = lsn / io_buf_size * io_buf_size;
self.segments[idx].active_to_inactive(segment_lsn, true);
} else {
// this is necessary for properly removing the ordering
// info later on, if this segment is found to be empty
// during recovery.
self.segments[idx].lsn = Some(lsn);
}
assert!(!self.ordering.contains_key(&lsn));
self.ordering.insert(lsn, lid);
}
// Scan the log file if we don't know of any Lsn offsets yet, and recover
// the order of segments, and the highest Lsn.
fn scan_segment_lsns(&mut self) {
assert!(self.segments.is_empty());
let segment_len = self.config.get_io_buf_size() as LogID;
let mut cursor = 0;
let cached_f = self.config.cached_file();
let mut f = cached_f.borrow_mut();
while let Ok(segment) = f.read_segment_header(cursor) {
// in the future this can be optimized to just read
// the initial header at that position... but we need to
// make sure the segment is not torn
trace!("SA scanned header during startup {:?}", segment);
if segment.ok && (segment.lsn!= 0 || cursor == 0) {
// if lsn is 0, this is free
self.recover(segment.lsn, cursor);
} else {
// this segment was skipped or is free
trace!(
"freeing segment {} from scan_segment_lsns",
cursor,
);
self.free_segment(cursor, true);
}
cursor += segment_len;
}
// Check that the last <# io buffers> segments properly
// link their previous segment pointers.
self.clean_tail_tears(&mut f);
// Drop the file so that the `Iter` below is able to borrow
// the thread's file handle.
drop(f);
let mut empty_tip = true;
let max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((base_lsn, lid)) = max {
let segment_base = lid / segment_len * segment_len;
assert_eq!(lid, segment_base);
let mut tip = lid + SEG_HEADER_LEN as LogID;
let cur_lsn = base_lsn + SEG_HEADER_LEN as Lsn;
let segment_ceiling = base_lsn + segment_len -
SEG_TRAILER_LEN as LogID -
MSG_HEADER_LEN as LogID;
trace!(
"segment accountant recovering segment at lsn: {} \
read_offset: {}, ceiling: {}, cur_lsn: {}",
base_lsn,
lid,
segment_ceiling,
cur_lsn
);
let iter = Iter {
config: &self.config,
max_lsn: segment_ceiling,
cur_lsn: cur_lsn,
segment_base: None,
segment_iter: Box::new(vec![(base_lsn, lid)].into_iter()),
segment_len: segment_len as usize,
use_compression: self.config.get_use_compression(),
trailer: None,
};
for (_lsn, lid, _buf) in iter {
empty_tip = false;
tip = lid;
assert!(tip <= segment_ceiling);
}
if!empty_tip {
// if we found any later
let mut f = cached_f.borrow_mut();
let (_, _, len) = f.read_message(
tip,
segment_len as usize,
self.config.get_use_compression(),
).unwrap()
.flush()
.unwrap();
tip += MSG_HEADER_LEN as LogID + len as LogID;
self.recovered_lid = tip;
}
let segment_overhang = self.recovered_lid %
self.config.get_io_buf_size() as LogID;
self.recovered_lsn = base_lsn + segment_overhang;
} else {
assert!(
self.ordering.is_empty(),
"should have found recovered lsn {} in ordering {:?}",
self.recovered_lsn,
self.ordering
);
}
// determine the end of our valid entries
for &lid in self.ordering.values() {
if lid >= self.tip {
let new_tip = lid + self.config.get_io_buf_size() as LogID;
self.tip = new_tip;
}
}
if empty_tip && max.is_some() {
let (_lsn, lid) = max.unwrap();
debug!("freed empty tip segment {} while recovering segments", lid);
self.free_segment(lid, true);
}
// make sure we don't double-allocate a segment
while self.free.lock().unwrap().contains(&self.tip) {
self.tip += self.config.get_io_buf_size() as LogID;
}
debug!(
"segment accountant recovered max lsn:{}, lid: {}",
self.recovered_lsn,
self.recovered_lid
);
}
fn free_segment(&mut self, lid: LogID, in_recovery: bool) {
debug!("freeing segment {}", lid);
let idx = self.lid_to_idx(lid);
assert_eq!(self.segments[idx].state, Free);
assert!(
!self.free.lock().unwrap().contains(&lid),
"double-free of a segment occurred"
);
if in_recovery {
self.free.lock().unwrap().push_front(lid);
// We only want to immediately remove the segment
// mapping if we're in recovery because otherwise
// we may be acting on updates relating to things
// in IO buffers, before they have been flushed.
// The latter will be removed from the mapping
// before being reused, in the next() method.
if let Some(old_lsn) = self.segments[idx].lsn {
trace!(
"removing segment {} with lsn {} from ordering",
lid,
old_lsn
);
self.ordering.remove(&old_lsn);
}
} else {
self.ensure_safe_free_distance();
pin(|scope| {
let pd = Owned::new(SegmentDropper(lid, self.free.clone()));
let ptr = pd.into_ptr(scope);
unsafe {
scope.defer_drop(ptr);
scope.flush();
}
});
}
}
// This ensures that the last <# io buffers> segments on
// disk connect via their previous segment pointers in
// the header. This is important because we expect that
// the last <# io buffers> segments will join up, and we
// never reuse buffers within this safety range.
fn | (&mut self, f: &mut File) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<(Lsn, LogID)> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(lsn, lid)| (*lsn, *lid))
.collect();
let mut tear_at = None;
for (i, &(_lsn, lid)) in logical_tail.iter().enumerate() {
if i + 1 == logical_tail.len() {
// we've reached the end, nothing to check after
break;
}
// check link
let segment_header = f.read_segment_header(lid).unwrap();
if!segment_header.ok {
error!(
"read corrupted segment header during recovery of segment {}",
lid
);
tear_at = Some(i);
continue;
}
let expected_prev = segment_header.prev;
let actual_prev = logical_tail[i + 1].1;
if expected_prev!= actual_prev {
// detected a tear, everything after
error!(
"detected corruption during recovery for segment at {}! \
expected prev lid: {} actual: {} in last chain {:?}",
lid,
expected_prev,
| clean_tail_tears | identifier_name |
segment_accountant.rs | self.lsn = Some(new_lsn);
self.state = Active;
}
/// Transitions a segment to being in the Inactive state.
/// Called in:
///
/// PageCache::advance_snapshot for marking when a
/// segment has been completely read
///
/// SegmentAccountant::recover for when
pub fn active_to_inactive(&mut self, lsn: Lsn, from_recovery: bool) {
trace!("setting Segment with lsn {:?} to Inactive", self.lsn());
assert_eq!(self.state, Active);
if from_recovery {
assert!(lsn >= self.lsn());
} else {
assert_eq!(self.lsn.unwrap(), lsn);
}
self.state = Inactive;
// now we can push any deferred removals to the removed set
let deferred = mem::replace(&mut self.deferred_remove, HashSet::new());
for pid in deferred {
self.remove_pid(pid, lsn);
}
}
pub fn inactive_to_draining(&mut self, lsn: Lsn) {
trace!("setting Segment with lsn {:?} to Draining", self.lsn());
assert_eq!(self.state, Inactive);
assert!(lsn >= self.lsn());
self.state = Draining;
}
pub fn draining_to_free(&mut self, lsn: Lsn) {
trace!("setting Segment with lsn {:?} to Free", self.lsn());
assert!(self.is_draining());
assert!(lsn >= self.lsn());
self.present.clear();
self.removed.clear();
self.state = Free;
}
pub fn recovery_ensure_initialized(&mut self, lsn: Lsn) {
if let Some(current_lsn) = self.lsn {
if current_lsn!= lsn {
assert!(lsn > current_lsn);
trace!("(snapshot) resetting segment to have lsn {}", lsn);
self.state = Free;
self.free_to_active(lsn);
}
} else {
trace!("(snapshot) resetting segment to have lsn {}", lsn);
self.free_to_active(lsn);
}
}
fn lsn(&self) -> Lsn {
self.lsn.unwrap()
}
/// Add a pid to the Segment. The caller must provide
/// the Segment's LSN.
pub fn insert_pid(&mut self, pid: PageID, lsn: Lsn) {
assert_eq!(lsn, self.lsn.unwrap());
// if this breaks, we didn't implement the transition
// logic right in write_to_log, and maybe a thread is
// using the SA to add pids AFTER their calls to
// res.complete() worked.
assert_eq!(self.state, Active);
assert!(!self.removed.contains(&pid));
self.present.insert(pid);
}
/// Mark that a pid in this Segment has been relocated.
/// The caller must provide the LSN of the removal.
pub fn remove_pid(&mut self, pid: PageID, lsn: Lsn) {
// TODO this could be racy?
assert!(lsn >= self.lsn.unwrap());
match self.state {
Active => {
// we have received a removal before
// transferring this segment to Inactive, so
// we defer this pid's removal until the transfer.
self.deferred_remove.insert(pid);
}
Inactive | Draining => {
self.present.remove(&pid);
self.removed.insert(pid);
}
Free => panic!("remove_pid called on a Free Segment"),
}
}
fn live_pct(&self) -> f64 {
let total = self.present.len() + self.removed.len();
self.present.len() as f64 / total as f64
}
fn can_free(&self) -> bool {
self.state == Draining && self.is_empty()
}
pub fn is_empty(&self) -> bool {
self.present.is_empty()
}
}
impl SegmentAccountant {
pub fn new(config: Config) -> SegmentAccountant {
let mut ret = SegmentAccountant::default();
ret.config = config;
ret.scan_segment_lsns();
ret
}
/// Called from the `PageCache` recovery logic, this initializes the
/// `SegmentAccountant` based on recovered segment information.
pub fn initialize_from_segments(&mut self, mut segments: Vec<Segment>) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<LogID> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(_lsn, lid)| *lid)
.collect();
for (idx, ref mut segment) in segments.iter_mut().enumerate() {
if segment.lsn.is_none() {
continue;
}
let segment_start = idx as LogID *
self.config.get_io_buf_size() as LogID;
let lsn = segment.lsn();
// populate free and to_clean if the segment has seen
if segment.is_empty() {
// can be reused immediately
if segment.state == Active {
segment.active_to_inactive(lsn, true);
}
if segment.state == Inactive {
segment.inactive_to_draining(lsn);
}
self.to_clean.remove(&segment_start);
trace!("pid {} freed @initialize_from_segments", segment_start);
if logical_tail.contains(&segment_start) {
// we depend on the invariant that the last segments
// always link together, so that we can detect torn
// segments during recovery.
self.ensure_safe_free_distance();
}
segment.draining_to_free(lsn);
if self.tip!= segment_start &&
!self.free.lock().unwrap().contains(&segment_start)
{
// don't give out this segment twice
trace!(
"freeing segment {} from initialize_from_segments, tip: {}",
segment_start,
self.tip
);
self.free_segment(segment_start, true);
}
} else if segment.live_pct() <=
self.config.get_segment_cleanup_threshold()
{
// can be cleaned
trace!(
"setting segment {} to Draining from initialize_from_segments",
segment_start
);
if segment.state == Active {
segment.active_to_inactive(lsn, true);
}
segment.inactive_to_draining(lsn);
self.to_clean.insert(segment_start);
self.free.lock().unwrap().retain(|&s| s!= segment_start);
} else {
self.free.lock().unwrap().retain(|&s| s!= segment_start);
}
}
self.set_last_given();
if!segments.is_empty() {
trace!("initialized self.segments to {:?}", segments);
for (i, segment) in segments.into_iter().enumerate() {
// we should not forget about segments that we've added
// during the initial segment scan, but freed for being
// empty, as that's where we set an LSN for them.
self.segments[i] = segment;
}
} else {
// this is basically just for when we recover with a single
// empty-yet-initialized segment
debug!(
"pagecache recovered no segments so not initializing from any"
);
}
}
fn set_last_given(&mut self) {
let new_max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((_lsn, lid)) = new_max {
trace!("setting last_given to {}", lid);
self.last_given = lid;
}
}
// Mark a specific segment as being present at a particular
// file offset.
fn recover(&mut self, lsn: Lsn, lid: LogID) {
trace!("recovered segment lsn {} at lid {}", lsn, lid);
let io_buf_size = self.config.get_io_buf_size() as LogID;
let idx = self.lid_to_idx(lid);
assert!(!(lsn == 0 && lid!= 0), "lsn 0 provided with non-zero lid");
if!self.segments[idx].is_empty() {
self.segments[idx].free_to_active(lsn);
let segment_lsn = lsn / io_buf_size * io_buf_size;
self.segments[idx].active_to_inactive(segment_lsn, true);
} else {
// this is necessary for properly removing the ordering
// info later on, if this segment is found to be empty
// during recovery.
self.segments[idx].lsn = Some(lsn);
}
assert!(!self.ordering.contains_key(&lsn));
self.ordering.insert(lsn, lid);
}
// Scan the log file if we don't know of any Lsn offsets yet, and recover
// the order of segments, and the highest Lsn.
fn scan_segment_lsns(&mut self) {
assert!(self.segments.is_empty());
let segment_len = self.config.get_io_buf_size() as LogID;
let mut cursor = 0;
let cached_f = self.config.cached_file();
let mut f = cached_f.borrow_mut();
while let Ok(segment) = f.read_segment_header(cursor) {
// in the future this can be optimized to just read
// the initial header at that position... but we need to
// make sure the segment is not torn
trace!("SA scanned header during startup {:?}", segment);
if segment.ok && (segment.lsn!= 0 || cursor == 0) {
// if lsn is 0, this is free
self.recover(segment.lsn, cursor);
} else {
// this segment was skipped or is free
trace!(
"freeing segment {} from scan_segment_lsns",
cursor,
);
self.free_segment(cursor, true);
}
cursor += segment_len;
}
// Check that the last <# io buffers> segments properly
// link their previous segment pointers.
self.clean_tail_tears(&mut f);
// Drop the file so that the `Iter` below is able to borrow
// the thread's file handle.
drop(f);
let mut empty_tip = true;
let max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((base_lsn, lid)) = max {
let segment_base = lid / segment_len * segment_len;
assert_eq!(lid, segment_base);
let mut tip = lid + SEG_HEADER_LEN as LogID;
let cur_lsn = base_lsn + SEG_HEADER_LEN as Lsn;
let segment_ceiling = base_lsn + segment_len -
SEG_TRAILER_LEN as LogID -
MSG_HEADER_LEN as LogID;
trace!(
"segment accountant recovering segment at lsn: {} \
read_offset: {}, ceiling: {}, cur_lsn: {}",
base_lsn,
lid,
segment_ceiling,
cur_lsn
);
let iter = Iter {
config: &self.config,
max_lsn: segment_ceiling,
cur_lsn: cur_lsn,
segment_base: None,
segment_iter: Box::new(vec![(base_lsn, lid)].into_iter()),
segment_len: segment_len as usize,
use_compression: self.config.get_use_compression(),
trailer: None,
};
for (_lsn, lid, _buf) in iter {
empty_tip = false;
tip = lid;
assert!(tip <= segment_ceiling);
}
if!empty_tip {
// if we found any later
let mut f = cached_f.borrow_mut();
let (_, _, len) = f.read_message(
tip,
segment_len as usize,
self.config.get_use_compression(),
).unwrap()
.flush()
.unwrap();
tip += MSG_HEADER_LEN as LogID + len as LogID;
self.recovered_lid = tip;
}
let segment_overhang = self.recovered_lid %
self.config.get_io_buf_size() as LogID;
self.recovered_lsn = base_lsn + segment_overhang;
} else {
assert!(
self.ordering.is_empty(),
"should have found recovered lsn {} in ordering {:?}",
self.recovered_lsn,
self.ordering
);
}
// determine the end of our valid entries
for &lid in self.ordering.values() {
if lid >= self.tip {
let new_tip = lid + self.config.get_io_buf_size() as LogID;
self.tip = new_tip;
}
}
if empty_tip && max.is_some() {
let (_lsn, lid) = max.unwrap();
debug!("freed empty tip segment {} while recovering segments", lid);
self.free_segment(lid, true);
}
// make sure we don't double-allocate a segment
while self.free.lock().unwrap().contains(&self.tip) {
self.tip += self.config.get_io_buf_size() as LogID;
}
debug!(
"segment accountant recovered max lsn:{}, lid: {}",
self.recovered_lsn,
self.recovered_lid
);
}
fn free_segment(&mut self, lid: LogID, in_recovery: bool) {
debug!("freeing segment {}", lid);
let idx = self.lid_to_idx(lid);
assert_eq!(self.segments[idx].state, Free);
assert!(
!self.free.lock().unwrap().contains(&lid),
"double-free of a segment occurred"
);
if in_recovery {
self.free.lock().unwrap().push_front(lid);
// We only want to immediately remove the segment
// mapping if we're in recovery because otherwise
// we may be acting on updates relating to things
// in IO buffers, before they have been flushed.
// The latter will be removed from the mapping
// before being reused, in the next() method.
if let Some(old_lsn) = self.segments[idx].lsn {
trace!(
"removing segment {} with lsn {} from ordering",
lid,
old_lsn
);
self.ordering.remove(&old_lsn);
}
} else {
self.ensure_safe_free_distance();
pin(|scope| {
let pd = Owned::new(SegmentDropper(lid, self.free.clone()));
let ptr = pd.into_ptr(scope);
unsafe {
scope.defer_drop(ptr);
scope.flush();
}
});
}
}
// This ensures that the last <# io buffers> segments on
// disk connect via their previous segment pointers in
// the header. This is important because we expect that
// the last <# io buffers> segments will join up, and we
// never reuse buffers within this safety range.
fn clean_tail_tears(&mut self, f: &mut File) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<(Lsn, LogID)> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(lsn, lid)| (*lsn, *lid))
.collect();
let mut tear_at = None;
for (i, &(_lsn, lid)) in logical_tail.iter().enumerate() {
if i + 1 == logical_tail.len() {
// we've reached the end, nothing to check after
break;
}
// check link
let segment_header = f.read_segment_header(lid).unwrap();
if!segment_header.ok {
error!(
"read corrupted segment header during recovery of segment {}",
lid
);
tear_at = Some(i);
continue;
}
let expected_prev = segment_header.prev;
let actual_prev = logical_tail[i + 1].1;
if expected_prev!= actual_prev {
// detected a tear, everything after
error!(
"detected corruption during recovery for segment at {}! \
| );
assert_eq!(self.state, Free);
self.present.clear();
self.removed.clear(); | random_line_split |
|
segment_accountant.rs | // this is necessary for properly removing the ordering
// info later on, if this segment is found to be empty
// during recovery.
self.segments[idx].lsn = Some(lsn);
}
assert!(!self.ordering.contains_key(&lsn));
self.ordering.insert(lsn, lid);
}
// Scan the log file if we don't know of any Lsn offsets yet, and recover
// the order of segments, and the highest Lsn.
fn scan_segment_lsns(&mut self) {
assert!(self.segments.is_empty());
let segment_len = self.config.get_io_buf_size() as LogID;
let mut cursor = 0;
let cached_f = self.config.cached_file();
let mut f = cached_f.borrow_mut();
while let Ok(segment) = f.read_segment_header(cursor) {
// in the future this can be optimized to just read
// the initial header at that position... but we need to
// make sure the segment is not torn
trace!("SA scanned header during startup {:?}", segment);
if segment.ok && (segment.lsn!= 0 || cursor == 0) {
// if lsn is 0, this is free
self.recover(segment.lsn, cursor);
} else {
// this segment was skipped or is free
trace!(
"freeing segment {} from scan_segment_lsns",
cursor,
);
self.free_segment(cursor, true);
}
cursor += segment_len;
}
// Check that the last <# io buffers> segments properly
// link their previous segment pointers.
self.clean_tail_tears(&mut f);
// Drop the file so that the `Iter` below is able to borrow
// the thread's file handle.
drop(f);
let mut empty_tip = true;
let max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((base_lsn, lid)) = max {
let segment_base = lid / segment_len * segment_len;
assert_eq!(lid, segment_base);
let mut tip = lid + SEG_HEADER_LEN as LogID;
let cur_lsn = base_lsn + SEG_HEADER_LEN as Lsn;
let segment_ceiling = base_lsn + segment_len -
SEG_TRAILER_LEN as LogID -
MSG_HEADER_LEN as LogID;
trace!(
"segment accountant recovering segment at lsn: {} \
read_offset: {}, ceiling: {}, cur_lsn: {}",
base_lsn,
lid,
segment_ceiling,
cur_lsn
);
let iter = Iter {
config: &self.config,
max_lsn: segment_ceiling,
cur_lsn: cur_lsn,
segment_base: None,
segment_iter: Box::new(vec![(base_lsn, lid)].into_iter()),
segment_len: segment_len as usize,
use_compression: self.config.get_use_compression(),
trailer: None,
};
for (_lsn, lid, _buf) in iter {
empty_tip = false;
tip = lid;
assert!(tip <= segment_ceiling);
}
if!empty_tip {
// if we found any later
let mut f = cached_f.borrow_mut();
let (_, _, len) = f.read_message(
tip,
segment_len as usize,
self.config.get_use_compression(),
).unwrap()
.flush()
.unwrap();
tip += MSG_HEADER_LEN as LogID + len as LogID;
self.recovered_lid = tip;
}
let segment_overhang = self.recovered_lid %
self.config.get_io_buf_size() as LogID;
self.recovered_lsn = base_lsn + segment_overhang;
} else {
assert!(
self.ordering.is_empty(),
"should have found recovered lsn {} in ordering {:?}",
self.recovered_lsn,
self.ordering
);
}
// determine the end of our valid entries
for &lid in self.ordering.values() {
if lid >= self.tip {
let new_tip = lid + self.config.get_io_buf_size() as LogID;
self.tip = new_tip;
}
}
if empty_tip && max.is_some() {
let (_lsn, lid) = max.unwrap();
debug!("freed empty tip segment {} while recovering segments", lid);
self.free_segment(lid, true);
}
// make sure we don't double-allocate a segment
while self.free.lock().unwrap().contains(&self.tip) {
self.tip += self.config.get_io_buf_size() as LogID;
}
debug!(
"segment accountant recovered max lsn:{}, lid: {}",
self.recovered_lsn,
self.recovered_lid
);
}
fn free_segment(&mut self, lid: LogID, in_recovery: bool) {
debug!("freeing segment {}", lid);
let idx = self.lid_to_idx(lid);
assert_eq!(self.segments[idx].state, Free);
assert!(
!self.free.lock().unwrap().contains(&lid),
"double-free of a segment occurred"
);
if in_recovery {
self.free.lock().unwrap().push_front(lid);
// We only want to immediately remove the segment
// mapping if we're in recovery because otherwise
// we may be acting on updates relating to things
// in IO buffers, before they have been flushed.
// The latter will be removed from the mapping
// before being reused, in the next() method.
if let Some(old_lsn) = self.segments[idx].lsn {
trace!(
"removing segment {} with lsn {} from ordering",
lid,
old_lsn
);
self.ordering.remove(&old_lsn);
}
} else {
self.ensure_safe_free_distance();
pin(|scope| {
let pd = Owned::new(SegmentDropper(lid, self.free.clone()));
let ptr = pd.into_ptr(scope);
unsafe {
scope.defer_drop(ptr);
scope.flush();
}
});
}
}
// This ensures that the last <# io buffers> segments on
// disk connect via their previous segment pointers in
// the header. This is important because we expect that
// the last <# io buffers> segments will join up, and we
// never reuse buffers within this safety range.
fn clean_tail_tears(&mut self, f: &mut File) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<(Lsn, LogID)> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(lsn, lid)| (*lsn, *lid))
.collect();
let mut tear_at = None;
for (i, &(_lsn, lid)) in logical_tail.iter().enumerate() {
if i + 1 == logical_tail.len() {
// we've reached the end, nothing to check after
break;
}
// check link
let segment_header = f.read_segment_header(lid).unwrap();
if!segment_header.ok {
error!(
"read corrupted segment header during recovery of segment {}",
lid
);
tear_at = Some(i);
continue;
}
let expected_prev = segment_header.prev;
let actual_prev = logical_tail[i + 1].1;
if expected_prev!= actual_prev {
// detected a tear, everything after
error!(
"detected corruption during recovery for segment at {}! \
expected prev lid: {} actual: {} in last chain {:?}",
lid,
expected_prev,
actual_prev,
logical_tail
);
tear_at = Some(i);
}
}
if let Some(i) = tear_at {
// we need to chop off the elements after the tear
for &(_lsn_to_chop, lid_to_chop) in &logical_tail[0..i] {
error!("clearing corrupted segment at lid {}", lid_to_chop);
self.free_segment(lid_to_chop, true);
// TODO write zeroes to these segments to reduce
// false recovery.
}
}
}
pub fn recovered_lid(&self) -> LogID {
self.recovered_lid
}
pub fn recovered_lsn(&self) -> Lsn {
self.recovered_lsn
}
/// Causes all new allocations to occur at the end of the file, which
/// is necessary to preserve consistency while concurrently iterating through
/// the log during snapshot creation.
pub fn pause_rewriting(&mut self) {
self.pause_rewriting = true;
}
/// Re-enables segment rewriting after iteration is complete.
pub fn resume_rewriting(&mut self) {
self.pause_rewriting = false;
}
/// Called by the `PageCache` when a page has been rewritten completely.
/// We mark all of the old segments that contained the previous state
/// from the page, and if the old segments are empty or clear enough to
/// begin accelerated cleaning we mark them as so.
pub fn mark_replace(
&mut self,
pid: PageID,
lsn: Lsn,
old_lids: Vec<LogID>,
new_lid: LogID,
) {
trace!("mark_replace pid {} at lid {} with lsn {}", pid, new_lid, lsn);
self.pending_clean.remove(&pid);
let new_idx = new_lid as usize / self.config.get_io_buf_size();
// make sure we're not actively trying to replace the destination
let new_segment_start = new_idx as LogID *
self.config.get_io_buf_size() as LogID;
self.to_clean.remove(&new_segment_start);
for old_lid in old_lids {
let old_idx = self.lid_to_idx(old_lid);
let segment_start = (old_idx * self.config.get_io_buf_size()) as
LogID;
if new_idx == old_idx {
// we probably haven't flushed this segment yet, so don't
// mark the pid as being removed from it
continue;
}
if self.segments[old_idx].lsn() > lsn {
// has been replaced after this call already,
// quite a big race happened
// TODO think about how this happens with our segment delay
continue;
}
if self.segments[old_idx].state == Free {
// this segment is already reused
// TODO should this be a panic?
continue;
}
self.segments[old_idx].remove_pid(pid, lsn);
if self.segments[old_idx].can_free() {
// can be reused immediately
self.segments[old_idx].draining_to_free(lsn);
self.to_clean.remove(&segment_start);
trace!("freed segment {} in replace", segment_start);
self.free_segment(segment_start, false);
} else if self.segments[old_idx].is_inactive() &&
self.segments[old_idx].live_pct() <=
self.config.get_segment_cleanup_threshold()
{
// can be cleaned
trace!(
"SA inserting {} into to_clean from mark_replace",
segment_start
);
self.segments[old_idx].inactive_to_draining(lsn);
self.to_clean.insert(segment_start);
}
}
self.mark_link(pid, lsn, new_lid);
}
/// Called by the `PageCache` to find useful pages
/// it should try to rewrite.
pub fn clean(&mut self, ignore: Option<PageID>) -> Option<PageID> {
// try to maintain about twice the number of necessary
// on-deck segments, to reduce the amount of log growth.
if self.free.lock().unwrap().len() >=
self.config.get_min_free_segments() * 2
{
return None;
}
let to_clean = self.to_clean.clone();
for lid in to_clean {
let idx = self.lid_to_idx(lid);
let segment = &self.segments[idx];
assert_eq!(segment.state, Draining);
for pid in &segment.present {
if self.pending_clean.contains(pid) || ignore == Some(*pid) {
continue;
}
self.pending_clean.insert(*pid);
trace!(
"telling caller to clean {} from segment at {}",
pid,
lid,
);
return Some(*pid);
}
}
None
}
/// Called from `PageCache` when some state has been added
/// to a logical page at a particular offset. We ensure the
/// page is present in the segment's page set.
pub fn mark_link(&mut self, pid: PageID, lsn: Lsn, lid: LogID) {
trace!("mark_link pid {} at lid {}", pid, lid);
self.pending_clean.remove(&pid);
let idx = self.lid_to_idx(lid);
// make sure we're not actively trying to replace the destination
let new_segment_start = idx as LogID *
self.config.get_io_buf_size() as LogID;
self.to_clean.remove(&new_segment_start);
let segment = &mut self.segments[idx];
if segment.lsn() > lsn {
// a race happened, and our Lsn does not apply anymore
// TODO think about how this happens with segment delay
return;
}
let segment_lsn = lsn / self.config.get_io_buf_size() as Lsn *
self.config.get_io_buf_size() as Lsn;
segment.insert_pid(pid, segment_lsn);
}
/// Called after the trailer of a segment has been written to disk,
/// indicating that no more pids will be added to a segment. Moves
/// the segment into the Inactive state.
///
/// # Panics
/// The provided lsn and lid must exactly match the existing segment.
pub fn deactivate_segment(&mut self, lsn: Lsn, lid: LogID) {
let idx = self.lid_to_idx(lid);
self.segments[idx].active_to_inactive(lsn, false);
}
fn bump_tip(&mut self) -> LogID {
let lid = self.tip;
self.tip += self.config.get_io_buf_size() as LogID;
trace!("advancing file tip from {} to {}", lid, self.tip);
lid
}
fn ensure_safe_free_distance(&mut self) {
// NB we must maintain a queue of free segments that
// is at least as long as the number of io buffers.
// This is so that we will never give out a segment
// that has been placed on the free queue after its
// contained pages have all had updates added to an
// IO buffer during a PageCache replace, but whose
// replacing updates have not actually landed on disk
// yet. If updates always have to wait in a queue
// at least as long as the number of IO buffers, it
// guarantees that the old updates are actually safe
// somewhere else first. Note that we push_front here
// so that the log tip is used first.
while self.free.lock().unwrap().len() < self.config.get_io_bufs() {
let new_lid = self.bump_tip();
trace!(
"pushing segment {} to free from ensure_safe_free_distance",
new_lid
);
self.free.lock().unwrap().push_front(new_lid);
}
}
/// Returns the next offset to write a new segment in,
/// as well as the offset of the previous segment that
/// was allocated, so that we can detect missing
/// out-of-order segments during recovery.
pub fn next(&mut self, lsn: Lsn) -> (LogID, LogID) {
assert_eq!(
lsn % self.config.get_io_buf_size() as Lsn,
0,
"unaligned Lsn provided to next!"
);
// pop free or add to end
let lid = if self.pause_rewriting {
self.bump_tip()
} else {
let res = self.free.lock().unwrap().pop_front();
if res.is_none() {
self.bump_tip()
} else {
res.unwrap()
}
};
let last_given = self.last_given;
// pin lsn to this segment
let idx = self.lid_to_idx(lid);
assert_eq!(self.segments[idx].state, Free);
// remove the ordering from our list
if let Some(old_lsn) = self.segments[idx].lsn | {
self.ordering.remove(&old_lsn);
} | conditional_block |
|
history.rs | use super::*;
use std::{
collections::{vec_deque, VecDeque},
fs::File,
io::{self, Write},
io::{BufRead, BufReader, BufWriter},
iter::IntoIterator,
ops::Index,
ops::IndexMut,
path::Path,
//time::Duration,
};
const DEFAULT_MAX_SIZE: usize = 1000;
/// Structure encapsulating command history
pub struct History {
// TODO: this should eventually be private
/// Vector of buffers to store history in
pub buffers: VecDeque<Buffer>,
/// Store a filename to save history into; if None don't save history
file_name: Option<String>,
/// Maximal number of buffers stored in the memory
/// TODO: just make this public?
max_buffers_size: usize,
/// Maximal number of lines stored in the file
// TODO: just make this public?
max_file_size: usize,
// TODO set from environment variable?
pub append_duplicate_entries: bool,
/// Append each entry to history file as entered?
pub inc_append: bool,
/// Share history across ion's with the same history file (combine with inc_append).
pub share: bool,
/// Last filesize of history file, used to optimize history sharing.
pub file_size: u64,
/// Allow loading duplicate entries, need to know this for loading history files.
pub load_duplicates: bool,
/// Writes between history compaction.
compaction_writes: usize,
}
impl Default for History {
fn default() -> Self {
Self::new()
}
}
impl History {
/// Create new History structure.
pub fn new() -> History {
History {
buffers: VecDeque::with_capacity(DEFAULT_MAX_SIZE),
file_name: None,
max_buffers_size: DEFAULT_MAX_SIZE,
max_file_size: DEFAULT_MAX_SIZE,
append_duplicate_entries: false,
inc_append: false,
share: false,
file_size: 0,
load_duplicates: true,
compaction_writes: 0,
}
}
/// Clears out the history.
pub fn clear_history(&mut self) {
self.buffers.clear();
}
/// Loads the history file from the saved path and appends it to the end of the history if append
/// is true otherwise replace history.
pub fn load_history(&mut self, append: bool) -> io::Result<u64> {
if let Some(path) = self.file_name.clone() {
let file_size = self.file_size;
self.load_history_file_test(&path, file_size, append)
.map(|l| {
self.file_size = l;
l
})
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"History filename not set!",
))
}
}
/// Loads the history file from path and appends it to the end of the history if append is true.
pub fn load_history_file<P: AsRef<Path>>(&mut self, path: P, append: bool) -> io::Result<u64> {
self.load_history_file_test(path, 0, append)
}
/// Loads the history file from path and appends it to the end of the history.f append is true
/// (replaces if false). Only loads if length is not equal to current file size.
fn load_history_file_test<P: AsRef<Path>>(
&mut self,
path: P,
length: u64,
append: bool,
) -> io::Result<u64> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let new_length = file.metadata()?.len();
if new_length == 0 && length == 0 &&!append {
// Special case, trying to load nothing and not appending- just clear.
self.clear_history();
}
if new_length!= length {
if!append {
self.clear_history();
}
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if!line.starts_with('#') {
self.buffers.push_back(Buffer::from(line));
}
}
Err(_) => break,
}
}
self.truncate();
if!self.load_duplicates {
let mut tmp_buffers: Vec<Buffer> = Vec::with_capacity(self.buffers.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(buf) = self.buffers.pop_back() {
self.remove_duplicates(&buf.to_string()[..]);
tmp_buffers.push(buf);
}
while let Some(buf) = tmp_buffers.pop() {
self.buffers.push_back(buf);
}
}
}
Ok(new_length)
}
/// Removes duplicates and trims a history file to max_file_size.
/// Primarily if inc_append is set without shared history.
/// Static because it should have no side effects on a history object.
fn deduplicate_history_file<P: AsRef<Path>>(
path: P,
max_file_size: usize,
) -> io::Result<String> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let mut buf: VecDeque<String> = VecDeque::new();
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if!line.starts_with('#') {
buf.push_back(line);
}
}
Err(_) => break,
}
}
let org_length = buf.len();
if buf.len() >= max_file_size {
let pop_out = buf.len() - max_file_size;
for _ in 0..pop_out {
buf.pop_front();
}
}
let mut tmp_buffers: Vec<String> = Vec::with_capacity(buf.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(line) = buf.pop_back() {
buf.retain(|buffer| *buffer!= line);
tmp_buffers.push(line);
}
while let Some(line) = tmp_buffers.pop() {
buf.push_back(line);
}
if org_length!= buf.len() {
// Overwrite the history file with the deduplicated version if it changed.
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in buf.into_iter() {
let _ = file.write_all(&command.as_bytes());
let _ = file.write_all(b"\n");
}
}
Ok("De-duplicated history file.".to_string())
}
/// Set history file name and at the same time load the history.
pub fn set_file_name_and_load_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<u64> {
let path = path.as_ref();
self.file_name = path.to_str().map(|s| s.to_owned());
self.file_size = 0;
if path.exists() {
self.load_history_file(path, false).map(|l| {
self.file_size = l;
l
})
} else {
File::create(path)?;
Ok(0)
}
}
/// Set maximal number of buffers stored in memory
pub fn set_max_buffers_size(&mut self, size: usize) {
self.max_buffers_size = size;
}
/// Set maximal number of entries in history file
pub fn set_max_file_size(&mut self, size: usize) {
self.max_file_size = size;
}
/// Number of items in history. | pub fn len(&self) -> usize {
self.buffers.len()
}
/// Is the history empty
pub fn is_empty(&self) -> bool {
self.buffers.is_empty()
}
/// Add a command to the history buffer and remove the oldest commands when the max history
/// size has been met. If writing to the disk is enabled, this function will be used for
/// logging history to the designated history file.
pub fn push(&mut self, new_item: Buffer) -> io::Result<()> {
// buffers[0] is the oldest entry
// the new entry goes to the end
if!self.append_duplicate_entries
&& self.buffers.back().map(|b| b.to_string()) == Some(new_item.to_string())
{
return Ok(());
}
let item_str = String::from(new_item.clone());
self.buffers.push_back(new_item);
//self.to_max_size();
while self.buffers.len() > self.max_buffers_size {
self.buffers.pop_front();
}
if self.inc_append && self.file_name.is_some() {
if!self.load_duplicates {
// Do not want duplicates so periodically compact the history file.
self.compaction_writes += 1;
// Every 30 writes "compact" the history file by writing just in memory history. This
// is to keep the history file clean and at a reasonable size (not much over max
// history size at it's worst).
if self.compaction_writes > 29 {
if self.share {
// Reload history, we may be out of sync.
let _ = self.load_history(false);
// Commit the duplicated history.
if let Some(file_name) = self.file_name.clone() {
let _ = self.overwrite_history(file_name);
}
} else {
// Not using shared history so just de-dup the file without messing with
// our history.
if let Some(file_name) = self.file_name.clone() {
let _ =
History::deduplicate_history_file(file_name, self.max_file_size);
}
}
self.compaction_writes = 0;
}
} else {
// If allowing duplicates then no need for compaction.
self.compaction_writes = 1;
}
let file_name = self.file_name.clone().unwrap();
if let Ok(inner_file) = std::fs::OpenOptions::new().append(true).open(&file_name) {
// Leave file size alone, if it is not right trigger a reload later.
if self.compaction_writes > 0 {
// If 0 we "compacted" and nothing to write.
let mut file = BufWriter::new(inner_file);
let _ = file.write_all(&item_str.as_bytes());
let _ = file.write_all(b"\n");
// Save the filesize after each append so we do not reload when we do not need to.
self.file_size += item_str.len() as u64 + 1;
}
}
}
Ok(())
}
/// Removes duplicate entries in the history
pub fn remove_duplicates(&mut self, input: &str) {
self.buffers.retain(|buffer| {
let command = buffer.lines().concat();
command!= input
});
}
fn get_match<I>(&self, vals: I, search_term: &Buffer) -> Option<usize>
where
I: Iterator<Item = usize>,
{
vals.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.find(|(_i, tested)| tested.starts_with(search_term))
.map(|(i, _)| i)
}
/// Go through the history and try to find an index (newest to oldest) which starts the same
/// as the new buffer given to this function as argument. Starts at curr_position. Does no wrap.
pub fn get_newest_match(
&self,
curr_position: Option<usize>,
new_buff: &Buffer,
) -> Option<usize> {
let pos = curr_position.unwrap_or_else(|| self.buffers.len());
if pos > 0 {
self.get_match((0..pos).rev(), new_buff)
} else {
None
}
}
pub fn get_history_subset(&self, search_term: &Buffer) -> Vec<usize> {
let mut v: Vec<usize> = Vec::new();
let mut ret: Vec<usize> = (0..self.len())
.filter(|i| {
if let Some(tested) = self.buffers.get(*i) {
let starts = tested.starts_with(search_term);
let contains = tested.contains(search_term);
if starts {
v.push(*i);
}
contains &&!starts && tested!= search_term
} else {
false
}
})
.collect();
ret.append(&mut v);
ret
}
pub fn search_index(&self, search_term: &Buffer) -> Vec<usize> {
(0..self.len())
.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.filter(|(_i, tested)| tested.contains(search_term))
.map(|(i, _)| i)
.collect()
}
/// Get the history file name.
#[inline(always)]
pub fn file_name(&self) -> Option<&str> {
self.file_name.as_ref().map(|s| s.as_str())
}
fn truncate(&mut self) {
// Find how many lines we need to move backwards
// in the file to remove all the old commands.
if self.buffers.len() >= self.max_file_size {
let pop_out = self.buffers.len() - self.max_file_size;
for _ in 0..pop_out {
self.buffers.pop_front();
}
}
}
fn overwrite_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
self.truncate();
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in self.buffers.iter().cloned() {
let _ = file.write_all(&String::from(command).as_bytes());
let _ = file.write_all(b"\n");
}
Ok("Wrote history to file.".to_string())
}
pub fn commit_to_file_path<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
if self.inc_append {
Ok("Nothing to commit.".to_string())
} else {
self.overwrite_history(path)
}
}
pub fn commit_to_file(&mut self) {
if let Some(file_name) = self.file_name.clone() {
let _ = self.commit_to_file_path(file_name);
}
}
}
impl<'a> IntoIterator for &'a History {
type Item = &'a Buffer;
type IntoIter = vec_deque::Iter<'a, Buffer>;
fn into_iter(self) -> Self::IntoIter {
self.buffers.iter()
}
}
impl Index<usize> for History {
type Output = Buffer;
fn index(&self, index: usize) -> &Buffer {
&self.buffers[index]
}
}
impl IndexMut<usize> for History {
fn index_mut(&mut self, index: usize) -> &mut Buffer {
&mut self.buffers[index]
}
} | #[inline(always)] | random_line_split |
history.rs | use super::*;
use std::{
collections::{vec_deque, VecDeque},
fs::File,
io::{self, Write},
io::{BufRead, BufReader, BufWriter},
iter::IntoIterator,
ops::Index,
ops::IndexMut,
path::Path,
//time::Duration,
};
const DEFAULT_MAX_SIZE: usize = 1000;
/// Structure encapsulating command history
pub struct History {
// TODO: this should eventually be private
/// Vector of buffers to store history in
pub buffers: VecDeque<Buffer>,
/// Store a filename to save history into; if None don't save history
file_name: Option<String>,
/// Maximal number of buffers stored in the memory
/// TODO: just make this public?
max_buffers_size: usize,
/// Maximal number of lines stored in the file
// TODO: just make this public?
max_file_size: usize,
// TODO set from environment variable?
pub append_duplicate_entries: bool,
/// Append each entry to history file as entered?
pub inc_append: bool,
/// Share history across ion's with the same history file (combine with inc_append).
pub share: bool,
/// Last filesize of history file, used to optimize history sharing.
pub file_size: u64,
/// Allow loading duplicate entries, need to know this for loading history files.
pub load_duplicates: bool,
/// Writes between history compaction.
compaction_writes: usize,
}
impl Default for History {
fn default() -> Self {
Self::new()
}
}
impl History {
/// Create new History structure.
pub fn new() -> History {
History {
buffers: VecDeque::with_capacity(DEFAULT_MAX_SIZE),
file_name: None,
max_buffers_size: DEFAULT_MAX_SIZE,
max_file_size: DEFAULT_MAX_SIZE,
append_duplicate_entries: false,
inc_append: false,
share: false,
file_size: 0,
load_duplicates: true,
compaction_writes: 0,
}
}
/// Clears out the history.
pub fn clear_history(&mut self) {
self.buffers.clear();
}
/// Loads the history file from the saved path and appends it to the end of the history if append
/// is true otherwise replace history.
pub fn load_history(&mut self, append: bool) -> io::Result<u64> {
if let Some(path) = self.file_name.clone() {
let file_size = self.file_size;
self.load_history_file_test(&path, file_size, append)
.map(|l| {
self.file_size = l;
l
})
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"History filename not set!",
))
}
}
/// Loads the history file from path and appends it to the end of the history if append is true.
pub fn load_history_file<P: AsRef<Path>>(&mut self, path: P, append: bool) -> io::Result<u64> {
self.load_history_file_test(path, 0, append)
}
/// Loads the history file from path and appends it to the end of the history.f append is true
/// (replaces if false). Only loads if length is not equal to current file size.
fn load_history_file_test<P: AsRef<Path>>(
&mut self,
path: P,
length: u64,
append: bool,
) -> io::Result<u64> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let new_length = file.metadata()?.len();
if new_length == 0 && length == 0 &&!append {
// Special case, trying to load nothing and not appending- just clear.
self.clear_history();
}
if new_length!= length {
if!append {
self.clear_history();
}
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if!line.starts_with('#') {
self.buffers.push_back(Buffer::from(line));
}
}
Err(_) => break,
}
}
self.truncate();
if!self.load_duplicates {
let mut tmp_buffers: Vec<Buffer> = Vec::with_capacity(self.buffers.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(buf) = self.buffers.pop_back() {
self.remove_duplicates(&buf.to_string()[..]);
tmp_buffers.push(buf);
}
while let Some(buf) = tmp_buffers.pop() {
self.buffers.push_back(buf);
}
}
}
Ok(new_length)
}
/// Removes duplicates and trims a history file to max_file_size.
/// Primarily if inc_append is set without shared history.
/// Static because it should have no side effects on a history object.
fn deduplicate_history_file<P: AsRef<Path>>(
path: P,
max_file_size: usize,
) -> io::Result<String> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let mut buf: VecDeque<String> = VecDeque::new();
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if!line.starts_with('#') {
buf.push_back(line);
}
}
Err(_) => break,
}
}
let org_length = buf.len();
if buf.len() >= max_file_size {
let pop_out = buf.len() - max_file_size;
for _ in 0..pop_out {
buf.pop_front();
}
}
let mut tmp_buffers: Vec<String> = Vec::with_capacity(buf.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(line) = buf.pop_back() {
buf.retain(|buffer| *buffer!= line);
tmp_buffers.push(line);
}
while let Some(line) = tmp_buffers.pop() {
buf.push_back(line);
}
if org_length!= buf.len() {
// Overwrite the history file with the deduplicated version if it changed.
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in buf.into_iter() {
let _ = file.write_all(&command.as_bytes());
let _ = file.write_all(b"\n");
}
}
Ok("De-duplicated history file.".to_string())
}
/// Set history file name and at the same time load the history.
pub fn set_file_name_and_load_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<u64> {
let path = path.as_ref();
self.file_name = path.to_str().map(|s| s.to_owned());
self.file_size = 0;
if path.exists() {
self.load_history_file(path, false).map(|l| {
self.file_size = l;
l
})
} else {
File::create(path)?;
Ok(0)
}
}
/// Set maximal number of buffers stored in memory
pub fn set_max_buffers_size(&mut self, size: usize) {
self.max_buffers_size = size;
}
/// Set maximal number of entries in history file
pub fn set_max_file_size(&mut self, size: usize) {
self.max_file_size = size;
}
/// Number of items in history.
#[inline(always)]
pub fn len(&self) -> usize {
self.buffers.len()
}
/// Is the history empty
pub fn is_empty(&self) -> bool {
self.buffers.is_empty()
}
/// Add a command to the history buffer and remove the oldest commands when the max history
/// size has been met. If writing to the disk is enabled, this function will be used for
/// logging history to the designated history file.
pub fn push(&mut self, new_item: Buffer) -> io::Result<()> | // Every 30 writes "compact" the history file by writing just in memory history. This
// is to keep the history file clean and at a reasonable size (not much over max
// history size at it's worst).
if self.compaction_writes > 29 {
if self.share {
// Reload history, we may be out of sync.
let _ = self.load_history(false);
// Commit the duplicated history.
if let Some(file_name) = self.file_name.clone() {
let _ = self.overwrite_history(file_name);
}
} else {
// Not using shared history so just de-dup the file without messing with
// our history.
if let Some(file_name) = self.file_name.clone() {
let _ =
History::deduplicate_history_file(file_name, self.max_file_size);
}
}
self.compaction_writes = 0;
}
} else {
// If allowing duplicates then no need for compaction.
self.compaction_writes = 1;
}
let file_name = self.file_name.clone().unwrap();
if let Ok(inner_file) = std::fs::OpenOptions::new().append(true).open(&file_name) {
// Leave file size alone, if it is not right trigger a reload later.
if self.compaction_writes > 0 {
// If 0 we "compacted" and nothing to write.
let mut file = BufWriter::new(inner_file);
let _ = file.write_all(&item_str.as_bytes());
let _ = file.write_all(b"\n");
// Save the filesize after each append so we do not reload when we do not need to.
self.file_size += item_str.len() as u64 + 1;
}
}
}
Ok(())
}
/// Removes duplicate entries in the history
pub fn remove_duplicates(&mut self, input: &str) {
self.buffers.retain(|buffer| {
let command = buffer.lines().concat();
command!= input
});
}
fn get_match<I>(&self, vals: I, search_term: &Buffer) -> Option<usize>
where
I: Iterator<Item = usize>,
{
vals.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.find(|(_i, tested)| tested.starts_with(search_term))
.map(|(i, _)| i)
}
/// Go through the history and try to find an index (newest to oldest) which starts the same
/// as the new buffer given to this function as argument. Starts at curr_position. Does no wrap.
pub fn get_newest_match(
&self,
curr_position: Option<usize>,
new_buff: &Buffer,
) -> Option<usize> {
let pos = curr_position.unwrap_or_else(|| self.buffers.len());
if pos > 0 {
self.get_match((0..pos).rev(), new_buff)
} else {
None
}
}
pub fn get_history_subset(&self, search_term: &Buffer) -> Vec<usize> {
let mut v: Vec<usize> = Vec::new();
let mut ret: Vec<usize> = (0..self.len())
.filter(|i| {
if let Some(tested) = self.buffers.get(*i) {
let starts = tested.starts_with(search_term);
let contains = tested.contains(search_term);
if starts {
v.push(*i);
}
contains &&!starts && tested!= search_term
} else {
false
}
})
.collect();
ret.append(&mut v);
ret
}
pub fn search_index(&self, search_term: &Buffer) -> Vec<usize> {
(0..self.len())
.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.filter(|(_i, tested)| tested.contains(search_term))
.map(|(i, _)| i)
.collect()
}
/// Get the history file name.
#[inline(always)]
pub fn file_name(&self) -> Option<&str> {
self.file_name.as_ref().map(|s| s.as_str())
}
fn truncate(&mut self) {
// Find how many lines we need to move backwards
// in the file to remove all the old commands.
if self.buffers.len() >= self.max_file_size {
let pop_out = self.buffers.len() - self.max_file_size;
for _ in 0..pop_out {
self.buffers.pop_front();
}
}
}
fn overwrite_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
self.truncate();
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in self.buffers.iter().cloned() {
let _ = file.write_all(&String::from(command).as_bytes());
let _ = file.write_all(b"\n");
}
Ok("Wrote history to file.".to_string())
}
pub fn commit_to_file_path<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
if self.inc_append {
Ok("Nothing to commit.".to_string())
} else {
self.overwrite_history(path)
}
}
pub fn commit_to_file(&mut self) {
if let Some(file_name) = self.file_name.clone() {
let _ = self.commit_to_file_path(file_name);
}
}
}
impl<'a> IntoIterator for &'a History {
type Item = &'a Buffer;
type IntoIter = vec_deque::Iter<'a, Buffer>;
fn into_iter(self) -> Self::IntoIter {
self.buffers.iter()
}
}
impl Index<usize> for History {
type Output = Buffer;
fn index(&self, index: usize) -> &Buffer {
&self.buffers[index]
}
}
impl IndexMut<usize> for History {
fn index_mut(&mut self, index: usize) -> &mut Buffer {
&mut self.buffers[index]
}
}
| {
// buffers[0] is the oldest entry
// the new entry goes to the end
if !self.append_duplicate_entries
&& self.buffers.back().map(|b| b.to_string()) == Some(new_item.to_string())
{
return Ok(());
}
let item_str = String::from(new_item.clone());
self.buffers.push_back(new_item);
//self.to_max_size();
while self.buffers.len() > self.max_buffers_size {
self.buffers.pop_front();
}
if self.inc_append && self.file_name.is_some() {
if !self.load_duplicates {
// Do not want duplicates so periodically compact the history file.
self.compaction_writes += 1; | identifier_body |
history.rs | use super::*;
use std::{
collections::{vec_deque, VecDeque},
fs::File,
io::{self, Write},
io::{BufRead, BufReader, BufWriter},
iter::IntoIterator,
ops::Index,
ops::IndexMut,
path::Path,
//time::Duration,
};
const DEFAULT_MAX_SIZE: usize = 1000;
/// Structure encapsulating command history
pub struct History {
// TODO: this should eventually be private
/// Vector of buffers to store history in
pub buffers: VecDeque<Buffer>,
/// Store a filename to save history into; if None don't save history
file_name: Option<String>,
/// Maximal number of buffers stored in the memory
/// TODO: just make this public?
max_buffers_size: usize,
/// Maximal number of lines stored in the file
// TODO: just make this public?
max_file_size: usize,
// TODO set from environment variable?
pub append_duplicate_entries: bool,
/// Append each entry to history file as entered?
pub inc_append: bool,
/// Share history across ion's with the same history file (combine with inc_append).
pub share: bool,
/// Last filesize of history file, used to optimize history sharing.
pub file_size: u64,
/// Allow loading duplicate entries, need to know this for loading history files.
pub load_duplicates: bool,
/// Writes between history compaction.
compaction_writes: usize,
}
impl Default for History {
fn default() -> Self {
Self::new()
}
}
impl History {
/// Create new History structure.
pub fn new() -> History {
History {
buffers: VecDeque::with_capacity(DEFAULT_MAX_SIZE),
file_name: None,
max_buffers_size: DEFAULT_MAX_SIZE,
max_file_size: DEFAULT_MAX_SIZE,
append_duplicate_entries: false,
inc_append: false,
share: false,
file_size: 0,
load_duplicates: true,
compaction_writes: 0,
}
}
/// Clears out the history.
pub fn clear_history(&mut self) {
self.buffers.clear();
}
/// Loads the history file from the saved path and appends it to the end of the history if append
/// is true otherwise replace history.
pub fn load_history(&mut self, append: bool) -> io::Result<u64> {
if let Some(path) = self.file_name.clone() {
let file_size = self.file_size;
self.load_history_file_test(&path, file_size, append)
.map(|l| {
self.file_size = l;
l
})
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"History filename not set!",
))
}
}
/// Loads the history file from path and appends it to the end of the history if append is true.
pub fn load_history_file<P: AsRef<Path>>(&mut self, path: P, append: bool) -> io::Result<u64> {
self.load_history_file_test(path, 0, append)
}
/// Loads the history file from path and appends it to the end of the history.f append is true
/// (replaces if false). Only loads if length is not equal to current file size.
fn load_history_file_test<P: AsRef<Path>>(
&mut self,
path: P,
length: u64,
append: bool,
) -> io::Result<u64> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let new_length = file.metadata()?.len();
if new_length == 0 && length == 0 &&!append {
// Special case, trying to load nothing and not appending- just clear.
self.clear_history();
}
if new_length!= length {
if!append {
self.clear_history();
}
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if!line.starts_with('#') {
self.buffers.push_back(Buffer::from(line));
}
}
Err(_) => break,
}
}
self.truncate();
if!self.load_duplicates {
let mut tmp_buffers: Vec<Buffer> = Vec::with_capacity(self.buffers.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(buf) = self.buffers.pop_back() {
self.remove_duplicates(&buf.to_string()[..]);
tmp_buffers.push(buf);
}
while let Some(buf) = tmp_buffers.pop() {
self.buffers.push_back(buf);
}
}
}
Ok(new_length)
}
/// Removes duplicates and trims a history file to max_file_size.
/// Primarily if inc_append is set without shared history.
/// Static because it should have no side effects on a history object.
fn deduplicate_history_file<P: AsRef<Path>>(
path: P,
max_file_size: usize,
) -> io::Result<String> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let mut buf: VecDeque<String> = VecDeque::new();
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if!line.starts_with('#') {
buf.push_back(line);
}
}
Err(_) => break,
}
}
let org_length = buf.len();
if buf.len() >= max_file_size {
let pop_out = buf.len() - max_file_size;
for _ in 0..pop_out {
buf.pop_front();
}
}
let mut tmp_buffers: Vec<String> = Vec::with_capacity(buf.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(line) = buf.pop_back() {
buf.retain(|buffer| *buffer!= line);
tmp_buffers.push(line);
}
while let Some(line) = tmp_buffers.pop() {
buf.push_back(line);
}
if org_length!= buf.len() {
// Overwrite the history file with the deduplicated version if it changed.
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in buf.into_iter() {
let _ = file.write_all(&command.as_bytes());
let _ = file.write_all(b"\n");
}
}
Ok("De-duplicated history file.".to_string())
}
/// Set history file name and at the same time load the history.
pub fn set_file_name_and_load_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<u64> {
let path = path.as_ref();
self.file_name = path.to_str().map(|s| s.to_owned());
self.file_size = 0;
if path.exists() {
self.load_history_file(path, false).map(|l| {
self.file_size = l;
l
})
} else {
File::create(path)?;
Ok(0)
}
}
/// Set maximal number of buffers stored in memory
pub fn set_max_buffers_size(&mut self, size: usize) {
self.max_buffers_size = size;
}
/// Set maximal number of entries in history file
pub fn set_max_file_size(&mut self, size: usize) {
self.max_file_size = size;
}
/// Number of items in history.
#[inline(always)]
pub fn len(&self) -> usize {
self.buffers.len()
}
/// Is the history empty
pub fn is_empty(&self) -> bool {
self.buffers.is_empty()
}
/// Add a command to the history buffer and remove the oldest commands when the max history
/// size has been met. If writing to the disk is enabled, this function will be used for
/// logging history to the designated history file.
pub fn push(&mut self, new_item: Buffer) -> io::Result<()> {
// buffers[0] is the oldest entry
// the new entry goes to the end
if!self.append_duplicate_entries
&& self.buffers.back().map(|b| b.to_string()) == Some(new_item.to_string())
{
return Ok(());
}
let item_str = String::from(new_item.clone());
self.buffers.push_back(new_item);
//self.to_max_size();
while self.buffers.len() > self.max_buffers_size {
self.buffers.pop_front();
}
if self.inc_append && self.file_name.is_some() {
if!self.load_duplicates {
// Do not want duplicates so periodically compact the history file.
self.compaction_writes += 1;
// Every 30 writes "compact" the history file by writing just in memory history. This
// is to keep the history file clean and at a reasonable size (not much over max
// history size at it's worst).
if self.compaction_writes > 29 {
if self.share {
// Reload history, we may be out of sync.
let _ = self.load_history(false);
// Commit the duplicated history.
if let Some(file_name) = self.file_name.clone() {
let _ = self.overwrite_history(file_name);
}
} else {
// Not using shared history so just de-dup the file without messing with
// our history.
if let Some(file_name) = self.file_name.clone() {
let _ =
History::deduplicate_history_file(file_name, self.max_file_size);
}
}
self.compaction_writes = 0;
}
} else {
// If allowing duplicates then no need for compaction.
self.compaction_writes = 1;
}
let file_name = self.file_name.clone().unwrap();
if let Ok(inner_file) = std::fs::OpenOptions::new().append(true).open(&file_name) {
// Leave file size alone, if it is not right trigger a reload later.
if self.compaction_writes > 0 {
// If 0 we "compacted" and nothing to write.
let mut file = BufWriter::new(inner_file);
let _ = file.write_all(&item_str.as_bytes());
let _ = file.write_all(b"\n");
// Save the filesize after each append so we do not reload when we do not need to.
self.file_size += item_str.len() as u64 + 1;
}
}
}
Ok(())
}
/// Removes duplicate entries in the history
pub fn remove_duplicates(&mut self, input: &str) {
self.buffers.retain(|buffer| {
let command = buffer.lines().concat();
command!= input
});
}
fn get_match<I>(&self, vals: I, search_term: &Buffer) -> Option<usize>
where
I: Iterator<Item = usize>,
{
vals.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.find(|(_i, tested)| tested.starts_with(search_term))
.map(|(i, _)| i)
}
/// Go through the history and try to find an index (newest to oldest) which starts the same
/// as the new buffer given to this function as argument. Starts at curr_position. Does no wrap.
pub fn get_newest_match(
&self,
curr_position: Option<usize>,
new_buff: &Buffer,
) -> Option<usize> {
let pos = curr_position.unwrap_or_else(|| self.buffers.len());
if pos > 0 {
self.get_match((0..pos).rev(), new_buff)
} else {
None
}
}
pub fn get_history_subset(&self, search_term: &Buffer) -> Vec<usize> {
let mut v: Vec<usize> = Vec::new();
let mut ret: Vec<usize> = (0..self.len())
.filter(|i| {
if let Some(tested) = self.buffers.get(*i) {
let starts = tested.starts_with(search_term);
let contains = tested.contains(search_term);
if starts {
v.push(*i);
}
contains &&!starts && tested!= search_term
} else {
false
}
})
.collect();
ret.append(&mut v);
ret
}
pub fn search_index(&self, search_term: &Buffer) -> Vec<usize> {
(0..self.len())
.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.filter(|(_i, tested)| tested.contains(search_term))
.map(|(i, _)| i)
.collect()
}
/// Get the history file name.
#[inline(always)]
pub fn file_name(&self) -> Option<&str> {
self.file_name.as_ref().map(|s| s.as_str())
}
fn truncate(&mut self) {
// Find how many lines we need to move backwards
// in the file to remove all the old commands.
if self.buffers.len() >= self.max_file_size {
let pop_out = self.buffers.len() - self.max_file_size;
for _ in 0..pop_out {
self.buffers.pop_front();
}
}
}
fn overwrite_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
self.truncate();
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in self.buffers.iter().cloned() {
let _ = file.write_all(&String::from(command).as_bytes());
let _ = file.write_all(b"\n");
}
Ok("Wrote history to file.".to_string())
}
pub fn commit_to_file_path<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
if self.inc_append {
Ok("Nothing to commit.".to_string())
} else {
self.overwrite_history(path)
}
}
pub fn commit_to_file(&mut self) {
if let Some(file_name) = self.file_name.clone() {
let _ = self.commit_to_file_path(file_name);
}
}
}
impl<'a> IntoIterator for &'a History {
type Item = &'a Buffer;
type IntoIter = vec_deque::Iter<'a, Buffer>;
fn | (self) -> Self::IntoIter {
self.buffers.iter()
}
}
impl Index<usize> for History {
type Output = Buffer;
fn index(&self, index: usize) -> &Buffer {
&self.buffers[index]
}
}
impl IndexMut<usize> for History {
fn index_mut(&mut self, index: usize) -> &mut Buffer {
&mut self.buffers[index]
}
}
| into_iter | identifier_name |
history.rs | use super::*;
use std::{
collections::{vec_deque, VecDeque},
fs::File,
io::{self, Write},
io::{BufRead, BufReader, BufWriter},
iter::IntoIterator,
ops::Index,
ops::IndexMut,
path::Path,
//time::Duration,
};
const DEFAULT_MAX_SIZE: usize = 1000;
/// Structure encapsulating command history
pub struct History {
// TODO: this should eventually be private
/// Vector of buffers to store history in
pub buffers: VecDeque<Buffer>,
/// Store a filename to save history into; if None don't save history
file_name: Option<String>,
/// Maximal number of buffers stored in the memory
/// TODO: just make this public?
max_buffers_size: usize,
/// Maximal number of lines stored in the file
// TODO: just make this public?
max_file_size: usize,
// TODO set from environment variable?
pub append_duplicate_entries: bool,
/// Append each entry to history file as entered?
pub inc_append: bool,
/// Share history across ion's with the same history file (combine with inc_append).
pub share: bool,
/// Last filesize of history file, used to optimize history sharing.
pub file_size: u64,
/// Allow loading duplicate entries, need to know this for loading history files.
pub load_duplicates: bool,
/// Writes between history compaction.
compaction_writes: usize,
}
impl Default for History {
fn default() -> Self {
Self::new()
}
}
impl History {
/// Create new History structure.
pub fn new() -> History {
History {
buffers: VecDeque::with_capacity(DEFAULT_MAX_SIZE),
file_name: None,
max_buffers_size: DEFAULT_MAX_SIZE,
max_file_size: DEFAULT_MAX_SIZE,
append_duplicate_entries: false,
inc_append: false,
share: false,
file_size: 0,
load_duplicates: true,
compaction_writes: 0,
}
}
/// Clears out the history.
pub fn clear_history(&mut self) {
self.buffers.clear();
}
/// Loads the history file from the saved path and appends it to the end of the history if append
/// is true otherwise replace history.
pub fn load_history(&mut self, append: bool) -> io::Result<u64> {
if let Some(path) = self.file_name.clone() {
let file_size = self.file_size;
self.load_history_file_test(&path, file_size, append)
.map(|l| {
self.file_size = l;
l
})
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"History filename not set!",
))
}
}
/// Loads the history file from path and appends it to the end of the history if append is true.
pub fn load_history_file<P: AsRef<Path>>(&mut self, path: P, append: bool) -> io::Result<u64> {
self.load_history_file_test(path, 0, append)
}
/// Loads the history file from path and appends it to the end of the history.f append is true
/// (replaces if false). Only loads if length is not equal to current file size.
fn load_history_file_test<P: AsRef<Path>>(
&mut self,
path: P,
length: u64,
append: bool,
) -> io::Result<u64> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let new_length = file.metadata()?.len();
if new_length == 0 && length == 0 &&!append {
// Special case, trying to load nothing and not appending- just clear.
self.clear_history();
}
if new_length!= length {
if!append {
self.clear_history();
}
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if!line.starts_with('#') {
self.buffers.push_back(Buffer::from(line));
}
}
Err(_) => break,
}
}
self.truncate();
if!self.load_duplicates {
let mut tmp_buffers: Vec<Buffer> = Vec::with_capacity(self.buffers.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(buf) = self.buffers.pop_back() {
self.remove_duplicates(&buf.to_string()[..]);
tmp_buffers.push(buf);
}
while let Some(buf) = tmp_buffers.pop() {
self.buffers.push_back(buf);
}
}
}
Ok(new_length)
}
/// Removes duplicates and trims a history file to max_file_size.
/// Primarily if inc_append is set without shared history.
/// Static because it should have no side effects on a history object.
fn deduplicate_history_file<P: AsRef<Path>>(
path: P,
max_file_size: usize,
) -> io::Result<String> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let mut buf: VecDeque<String> = VecDeque::new();
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if!line.starts_with('#') {
buf.push_back(line);
}
}
Err(_) => break,
}
}
let org_length = buf.len();
if buf.len() >= max_file_size {
let pop_out = buf.len() - max_file_size;
for _ in 0..pop_out {
buf.pop_front();
}
}
let mut tmp_buffers: Vec<String> = Vec::with_capacity(buf.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(line) = buf.pop_back() {
buf.retain(|buffer| *buffer!= line);
tmp_buffers.push(line);
}
while let Some(line) = tmp_buffers.pop() {
buf.push_back(line);
}
if org_length!= buf.len() {
// Overwrite the history file with the deduplicated version if it changed.
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in buf.into_iter() {
let _ = file.write_all(&command.as_bytes());
let _ = file.write_all(b"\n");
}
}
Ok("De-duplicated history file.".to_string())
}
/// Set history file name and at the same time load the history.
pub fn set_file_name_and_load_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<u64> {
let path = path.as_ref();
self.file_name = path.to_str().map(|s| s.to_owned());
self.file_size = 0;
if path.exists() {
self.load_history_file(path, false).map(|l| {
self.file_size = l;
l
})
} else {
File::create(path)?;
Ok(0)
}
}
/// Set maximal number of buffers stored in memory
pub fn set_max_buffers_size(&mut self, size: usize) {
self.max_buffers_size = size;
}
/// Set maximal number of entries in history file
pub fn set_max_file_size(&mut self, size: usize) {
self.max_file_size = size;
}
/// Number of items in history.
#[inline(always)]
pub fn len(&self) -> usize {
self.buffers.len()
}
/// Is the history empty
pub fn is_empty(&self) -> bool {
self.buffers.is_empty()
}
/// Add a command to the history buffer and remove the oldest commands when the max history
/// size has been met. If writing to the disk is enabled, this function will be used for
/// logging history to the designated history file.
pub fn push(&mut self, new_item: Buffer) -> io::Result<()> {
// buffers[0] is the oldest entry
// the new entry goes to the end
if!self.append_duplicate_entries
&& self.buffers.back().map(|b| b.to_string()) == Some(new_item.to_string())
{
return Ok(());
}
let item_str = String::from(new_item.clone());
self.buffers.push_back(new_item);
//self.to_max_size();
while self.buffers.len() > self.max_buffers_size {
self.buffers.pop_front();
}
if self.inc_append && self.file_name.is_some() {
if!self.load_duplicates {
// Do not want duplicates so periodically compact the history file.
self.compaction_writes += 1;
// Every 30 writes "compact" the history file by writing just in memory history. This
// is to keep the history file clean and at a reasonable size (not much over max
// history size at it's worst).
if self.compaction_writes > 29 {
if self.share {
// Reload history, we may be out of sync.
let _ = self.load_history(false);
// Commit the duplicated history.
if let Some(file_name) = self.file_name.clone() {
let _ = self.overwrite_history(file_name);
}
} else {
// Not using shared history so just de-dup the file without messing with
// our history.
if let Some(file_name) = self.file_name.clone() {
let _ =
History::deduplicate_history_file(file_name, self.max_file_size);
}
}
self.compaction_writes = 0;
}
} else {
// If allowing duplicates then no need for compaction.
self.compaction_writes = 1;
}
let file_name = self.file_name.clone().unwrap();
if let Ok(inner_file) = std::fs::OpenOptions::new().append(true).open(&file_name) {
// Leave file size alone, if it is not right trigger a reload later.
if self.compaction_writes > 0 {
// If 0 we "compacted" and nothing to write.
let mut file = BufWriter::new(inner_file);
let _ = file.write_all(&item_str.as_bytes());
let _ = file.write_all(b"\n");
// Save the filesize after each append so we do not reload when we do not need to.
self.file_size += item_str.len() as u64 + 1;
}
}
}
Ok(())
}
/// Removes duplicate entries in the history
pub fn remove_duplicates(&mut self, input: &str) {
self.buffers.retain(|buffer| {
let command = buffer.lines().concat();
command!= input
});
}
fn get_match<I>(&self, vals: I, search_term: &Buffer) -> Option<usize>
where
I: Iterator<Item = usize>,
{
vals.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.find(|(_i, tested)| tested.starts_with(search_term))
.map(|(i, _)| i)
}
/// Go through the history and try to find an index (newest to oldest) which starts the same
/// as the new buffer given to this function as argument. Starts at curr_position. Does no wrap.
pub fn get_newest_match(
&self,
curr_position: Option<usize>,
new_buff: &Buffer,
) -> Option<usize> {
let pos = curr_position.unwrap_or_else(|| self.buffers.len());
if pos > 0 {
self.get_match((0..pos).rev(), new_buff)
} else {
None
}
}
pub fn get_history_subset(&self, search_term: &Buffer) -> Vec<usize> {
let mut v: Vec<usize> = Vec::new();
let mut ret: Vec<usize> = (0..self.len())
.filter(|i| {
if let Some(tested) = self.buffers.get(*i) {
let starts = tested.starts_with(search_term);
let contains = tested.contains(search_term);
if starts |
contains &&!starts && tested!= search_term
} else {
false
}
})
.collect();
ret.append(&mut v);
ret
}
pub fn search_index(&self, search_term: &Buffer) -> Vec<usize> {
(0..self.len())
.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.filter(|(_i, tested)| tested.contains(search_term))
.map(|(i, _)| i)
.collect()
}
/// Get the history file name.
#[inline(always)]
pub fn file_name(&self) -> Option<&str> {
self.file_name.as_ref().map(|s| s.as_str())
}
fn truncate(&mut self) {
// Find how many lines we need to move backwards
// in the file to remove all the old commands.
if self.buffers.len() >= self.max_file_size {
let pop_out = self.buffers.len() - self.max_file_size;
for _ in 0..pop_out {
self.buffers.pop_front();
}
}
}
fn overwrite_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
self.truncate();
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in self.buffers.iter().cloned() {
let _ = file.write_all(&String::from(command).as_bytes());
let _ = file.write_all(b"\n");
}
Ok("Wrote history to file.".to_string())
}
pub fn commit_to_file_path<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
if self.inc_append {
Ok("Nothing to commit.".to_string())
} else {
self.overwrite_history(path)
}
}
pub fn commit_to_file(&mut self) {
if let Some(file_name) = self.file_name.clone() {
let _ = self.commit_to_file_path(file_name);
}
}
}
impl<'a> IntoIterator for &'a History {
type Item = &'a Buffer;
type IntoIter = vec_deque::Iter<'a, Buffer>;
fn into_iter(self) -> Self::IntoIter {
self.buffers.iter()
}
}
impl Index<usize> for History {
type Output = Buffer;
fn index(&self, index: usize) -> &Buffer {
&self.buffers[index]
}
}
impl IndexMut<usize> for History {
fn index_mut(&mut self, index: usize) -> &mut Buffer {
&mut self.buffers[index]
}
}
| {
v.push(*i);
} | conditional_block |
rxcb.rs | //! Objective XCB Wrapper
#![allow(dead_code)]
extern crate univstring; use self::univstring::UnivString;
extern crate xcb;
use self::xcb::ffi::*;
use std::ptr::{null, null_mut};
use std::marker::PhantomData;
use std::io::{Error as IOError, ErrorKind};
#[repr(C)] pub enum WindowIOClass
{
InputOnly = XCB_WINDOW_CLASS_INPUT_ONLY as _,
InputOutput = XCB_WINDOW_CLASS_INPUT_OUTPUT as _,
FromParent = XCB_WINDOW_CLASS_COPY_FROM_PARENT as _
}
pub struct Connection(*mut xcb_connection_t);
impl Connection
{
pub fn new<S: UnivString +?Sized>(display: Option<&S>) -> Option<Self>
{
let display_name = display.map(|s| s.to_cstr().unwrap());
let p = unsafe
{
xcb_connect(display_name.as_ref().map(|p| p.as_ptr()).unwrap_or(null()), null_mut())
};
if p.is_null() { None } else { Some(Connection(p)) }
}
#[cfg(feature = "with_ferrite")]
pub(crate) fn inner(&self) -> *mut xcb_connection_t { self.0 }
pub fn setup(&self) -> &Setup { unsafe { &*(xcb_get_setup(self.0) as *mut _) } }
pub fn new_id(&self) -> u32 { unsafe { xcb_generate_id(self.0) } }
pub fn new_window_id(&self) -> Window { Window(self.new_id()) }
/*pub fn try_intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 0, name.len() as _, name.as_ptr()) }, self)
}*/
pub fn intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 1, name.len() as _, name.as_ptr() as _) }, self)
}
pub fn flush(&self) { unsafe { xcb_flush(self.0); } }
pub fn create_window(&self, depth: Option<u8>, id: &Window, parent: Option<xcb_window_t>,
x: i16, y: i16, width: u16, height: u16, border_width: u16, class: WindowIOClass,
visual: Option<VisualID>, valuelist: &WindowValueList) -> Result<(), GenericError>
{
let serialized = valuelist.serialize();
unsafe
{
CheckedCookie(xcb_create_window_checked(self.0, depth.unwrap_or(XCB_COPY_FROM_PARENT as _), id.0,
parent.unwrap_or_else(|| self.setup().iter_roots().next().unwrap().root()),
x, y, width, height, border_width, class as _, visual.unwrap_or(XCB_COPY_FROM_PARENT as _),
valuelist.0, serialized.0 as *const _), self).check()
}
}
pub fn map_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_map_window_checked(self.0, w.0), self).check() }
}
pub fn destroy_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_destroy_window_checked(self.0, w.0), self).check() }
}
}
impl Drop for Connection { fn drop(&mut self) { unsafe { xcb_disconnect(self.0) } } }
pub struct Setup(xcb_setup_t);
impl Setup
{
pub fn iter_roots(&self) -> IterRootScreen { IterRootScreen(unsafe { xcb_setup_roots_iterator(&self.0) }) }
}
#[repr(C)] pub struct Screen(xcb_screen_t);
impl Screen
{
pub fn root(&self) -> xcb_window_t { self.0.root }
// pub fn default_colormap(&self) -> xcb_colormap_t { self.0.default_colormap }
}
pub struct IterRootScreen<'s>(xcb_screen_iterator_t<'s>);
impl<'s> Iterator for IterRootScreen<'s>
{
type Item = &'s Screen;
fn next(&mut self) -> Option<&'s Screen>
{
if self.0.rem <= 0 { None }
else |
}
}
pub type WindowID = xcb_window_t;
pub struct Window(WindowID);
impl Window
{
pub(crate) fn id(&self) -> WindowID { self.0 }
pub fn replace_property<T: PropertyType +?Sized>(&self, con: &Connection, property: Atom, value: &T)
{
value.change_property_of(con, self, property, XCB_PROP_MODE_REPLACE)
}
}
pub trait PropertyType
{
const TYPE_ATOM: Atom; const DATA_STRIDE: u32;
fn change_property_of(&self, connection: &Connection, window: &Window, property: Atom, mode: u32);
}
impl PropertyType for str
{
const TYPE_ATOM: Atom = XCB_ATOM_STRING; const DATA_STRIDE: u32 = 8;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_STRING, 8,
self.len() as _, self.as_ptr() as _);
}
}
}
impl PropertyType for Atom
{
const TYPE_ATOM: Atom = XCB_ATOM_ATOM; const DATA_STRIDE: u32 = 32;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_ATOM, 32, 1,
self as *const Atom as *const _);
}
}
}
impl<E: PropertyType> PropertyType for [E]
{
const TYPE_ATOM: Atom = E::TYPE_ATOM; const DATA_STRIDE: u32 = E::DATA_STRIDE;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, E::TYPE_ATOM, E::DATA_STRIDE as _,
self.len() as _, self.as_ptr() as _);
}
}
}
pub use self::xcb::ffi::XCB_ATOM_WM_NAME;
pub struct CheckedCookie<'s>(xcb_void_cookie_t, &'s Connection);
impl<'s> CheckedCookie<'s>
{
pub fn check(&self) -> Result<(), GenericError>
{
let r = unsafe { xcb_request_check(self.1.0, self.0) };
if r.is_null() { Ok(()) } else { Err(unsafe { GenericError::from_ptr(r) }) }
}
}
pub struct AtomCookie<'s>(xcb_intern_atom_cookie_t, &'s Connection);
pub type Atom = xcb_atom_t;
impl<'s> AtomCookie<'s>
{
pub fn reply(self) -> Result<Atom, GenericError>
{
let mut _eptr = null_mut();
let r = unsafe { xcb_intern_atom_reply(self.1.0, self.0, &mut _eptr) };
if r.is_null() { Err(unsafe { GenericError::from_ptr(_eptr) }) } else { Ok(MallocBox(r).atom) }
}
}
use std::mem::transmute;
pub struct GenericEvent(MallocBox<xcb_generic_event_t>);
impl Connection
{
pub fn wait_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_wait_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
pub fn poll_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_poll_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
}
impl GenericEvent
{
pub fn response_type(&self) -> u8 { self.0.response_type &!0x80 }
}
pub struct ClientMessageEvent(MallocBox<xcb_client_message_event_t>);
impl ClientMessageEvent
{
pub fn msg_type(&self) -> xcb_atom_t { self.0.type_ }
pub fn data_as_u32(&self) -> u32 { unsafe { *(self.0.data.data.as_ptr() as *const u32) } }
}
pub struct ExposeEvent(MallocBox<xcb_expose_event_t>);
pub struct GenericError(MallocBox<xcb_generic_error_t>);
impl GenericError
{
unsafe fn from_ptr(p: *mut xcb_generic_error_t) -> Self { GenericError(MallocBox(p)) }
}
impl Debug for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "GenericError(code={})", (*self.0).error_code) }
}
impl Display for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <Self as Debug>::fmt(self, fmt) }
}
impl From<GenericError> for IOError
{
fn from(v: GenericError) -> IOError { IOError::new(ErrorKind::Other, Box::new(v)) }
}
impl ::std::error::Error for GenericError
{
fn description(&self) -> &str { "XCB Generic Error" }
fn cause(&self) -> Option<&::std::error::Error> { None }
}
unsafe impl Send for GenericError {}
unsafe impl Sync for GenericError {}
pub trait Event
{
const RESPONSE_ENUM: u8;
unsafe fn from_ref(g: &GenericEvent) -> &Self;
}
impl Event for ClientMessageEvent
{
const RESPONSE_ENUM: u8 = XCB_CLIENT_MESSAGE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for ExposeEvent
{
const RESPONSE_ENUM: u8 = XCB_EXPOSE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for GenericError
{
const RESPONSE_ENUM: u8 = 0; // unused
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
#[repr(C)] pub struct Depth(xcb_depth_t);
impl Depth
{
pub fn depth(&self) -> u8 { self.0.depth }
}
pub struct IterDepths<'c>(xcb_depth_iterator_t<'c>);
impl<'c> Iterator for IterDepths<'c>
{
type Item = &'c Depth;
fn next(&mut self) -> Option<&'c Depth>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_depth_next(&mut self.0); Some(&*p) } }
}
fn size_hint(&self) -> (usize, Option<usize>) { (self.0.rem as _, Some(self.0.rem as _)) }
}
impl Screen
{
pub fn iter_allowed_depths(&self) -> IterDepths { IterDepths(unsafe { xcb_screen_allowed_depths_iterator(&self.0) }) }
}
pub type VisualID = xcb_visualid_t;
#[repr(C)] pub struct VisualType(xcb_visualtype_t);
impl VisualType
{
pub fn id(&self) -> VisualID { self.0.visual_id }
pub fn is_truecolor(&self) -> bool { self.0.class == XCB_VISUAL_CLASS_TRUE_COLOR as _ }
}
pub struct IterVisualTypes<'c>(xcb_visualtype_iterator_t, PhantomData<&'c Connection>);
impl<'c> Iterator for IterVisualTypes<'c>
{
type Item = &'c VisualType;
fn next(&mut self) -> Option<&'c VisualType>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_visualtype_next(&mut self.0); Some(&*p) } }
}
}
impl Depth
{
pub fn iter_visuals(&self) -> IterVisualTypes
{
IterVisualTypes(unsafe { xcb_depth_visuals_iterator(&self.0) }, PhantomData)
}
}
#[allow(non_camel_case_types)]
pub type xcb_bool32_t = u32;
#[repr(C)] #[allow(non_camel_case_types)]
pub struct xcb_create_window_value_list_t
{
pub background_pixmap: xcb_pixmap_t, pub background_pixel: u32,
pub border_pixmap: xcb_pixmap_t, pub border_pixel: u32,
pub bit_gravity: u32, pub win_gravity: u32, pub backing_store: u32, pub backing_planes: u32, pub backing_pixel: u32,
pub override_redirect: xcb_bool32_t, pub save_under: xcb_bool32_t, pub event_mask: u32,
pub do_not_propagate_mask: u32, pub colormap: xcb_colormap_t, pub cursor: xcb_cursor_t
}
extern "C"
{
fn xcb_create_window_value_list_serialize(buffer: *mut *mut ::libc::c_void, value_mask: u32,
aux: *const xcb_create_window_value_list_t) -> ::libc::c_int;
}
#[repr(C)]
pub struct WindowValueList(u32, xcb_create_window_value_list_t);
impl WindowValueList
{
pub fn new() -> Self { WindowValueList(0, unsafe { ::std::mem::zeroed() }) }
pub fn border_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BORDER_PIXEL; self.1.border_pixel = p; self
}
pub fn back_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BACK_PIXEL; self.1.background_pixel = p; self
}
pub fn colormap(&mut self, c: &Colormap) -> &mut Self
{
self.0 |= XCB_CW_COLORMAP; self.1.colormap = c.id(); self
}
pub fn eventmask(&mut self, m: xcb_event_mask_t) -> &mut Self
{
self.0 |= XCB_CW_EVENT_MASK; self.1.event_mask = m; self
}
pub fn serialize(&self) -> MallocBox<::libc::c_void>
{
let mut p = null_mut();
unsafe { xcb_create_window_value_list_serialize(&mut p, self.0, &self.1) };
MallocBox(p)
}
}
pub struct Colormap(xcb_colormap_t);
impl Colormap
{
pub fn new(con: &Connection, visual: VisualID, window: xcb_window_t) -> Self
{
let id = con.new_id();
unsafe { xcb_create_colormap(con.0, XCB_COLORMAP_ALLOC_NONE as _, id, window, visual) }; Colormap(id)
}
pub fn id(&self) -> xcb_colormap_t { self.0 }
}
pub use self::xcb::ffi::{
XCB_EVENT_MASK_EXPOSURE
};
use std::ops::{Deref, DerefMut};
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
/// Owned malloc-ed pointer box
pub struct MallocBox<T:?Sized>(pub *mut T);
impl<T:?Sized> Deref for MallocBox<T> { type Target = T; fn deref(&self) -> &T { unsafe { &*self.0 } } }
impl<T:?Sized> DerefMut for MallocBox<T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.0 } } }
impl<T:?Sized> Drop for MallocBox<T>
{
fn drop(&mut self) { unsafe { ::libc::free(self.0 as *mut _) } }
}
impl<T:?Sized> Debug for MallocBox<T> where T: Debug
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <T as Debug>::fmt(&self, fmt) }
}
| { let p = self.0.data as *mut _; unsafe { xcb_screen_next(&mut self.0); Some(&*p) } } | conditional_block |
rxcb.rs | //! Objective XCB Wrapper
#![allow(dead_code)]
extern crate univstring; use self::univstring::UnivString;
extern crate xcb;
use self::xcb::ffi::*;
use std::ptr::{null, null_mut};
use std::marker::PhantomData;
use std::io::{Error as IOError, ErrorKind};
#[repr(C)] pub enum WindowIOClass
{
InputOnly = XCB_WINDOW_CLASS_INPUT_ONLY as _,
InputOutput = XCB_WINDOW_CLASS_INPUT_OUTPUT as _,
FromParent = XCB_WINDOW_CLASS_COPY_FROM_PARENT as _
}
pub struct Connection(*mut xcb_connection_t);
impl Connection
{
pub fn new<S: UnivString +?Sized>(display: Option<&S>) -> Option<Self>
{
let display_name = display.map(|s| s.to_cstr().unwrap());
let p = unsafe
{
xcb_connect(display_name.as_ref().map(|p| p.as_ptr()).unwrap_or(null()), null_mut())
};
if p.is_null() { None } else { Some(Connection(p)) }
}
#[cfg(feature = "with_ferrite")]
pub(crate) fn inner(&self) -> *mut xcb_connection_t { self.0 }
pub fn setup(&self) -> &Setup { unsafe { &*(xcb_get_setup(self.0) as *mut _) } }
pub fn new_id(&self) -> u32 { unsafe { xcb_generate_id(self.0) } }
pub fn new_window_id(&self) -> Window { Window(self.new_id()) }
/*pub fn try_intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 0, name.len() as _, name.as_ptr()) }, self)
}*/
pub fn intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 1, name.len() as _, name.as_ptr() as _) }, self)
}
pub fn flush(&self) { unsafe { xcb_flush(self.0); } }
pub fn create_window(&self, depth: Option<u8>, id: &Window, parent: Option<xcb_window_t>,
x: i16, y: i16, width: u16, height: u16, border_width: u16, class: WindowIOClass,
visual: Option<VisualID>, valuelist: &WindowValueList) -> Result<(), GenericError>
{
let serialized = valuelist.serialize();
unsafe
{
CheckedCookie(xcb_create_window_checked(self.0, depth.unwrap_or(XCB_COPY_FROM_PARENT as _), id.0,
parent.unwrap_or_else(|| self.setup().iter_roots().next().unwrap().root()),
x, y, width, height, border_width, class as _, visual.unwrap_or(XCB_COPY_FROM_PARENT as _),
valuelist.0, serialized.0 as *const _), self).check()
}
}
pub fn map_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_map_window_checked(self.0, w.0), self).check() }
}
pub fn destroy_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_destroy_window_checked(self.0, w.0), self).check() }
}
}
impl Drop for Connection { fn drop(&mut self) { unsafe { xcb_disconnect(self.0) } } }
pub struct Setup(xcb_setup_t);
impl Setup
{
pub fn iter_roots(&self) -> IterRootScreen { IterRootScreen(unsafe { xcb_setup_roots_iterator(&self.0) }) }
}
#[repr(C)] pub struct Screen(xcb_screen_t);
impl Screen
{
pub fn root(&self) -> xcb_window_t { self.0.root }
// pub fn default_colormap(&self) -> xcb_colormap_t { self.0.default_colormap }
}
pub struct IterRootScreen<'s>(xcb_screen_iterator_t<'s>);
impl<'s> Iterator for IterRootScreen<'s>
{
type Item = &'s Screen;
fn next(&mut self) -> Option<&'s Screen>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_screen_next(&mut self.0); Some(&*p) } }
}
}
pub type WindowID = xcb_window_t;
pub struct Window(WindowID);
impl Window
{
pub(crate) fn id(&self) -> WindowID { self.0 }
pub fn replace_property<T: PropertyType +?Sized>(&self, con: &Connection, property: Atom, value: &T)
{
value.change_property_of(con, self, property, XCB_PROP_MODE_REPLACE)
}
}
pub trait PropertyType
{
const TYPE_ATOM: Atom; const DATA_STRIDE: u32;
fn change_property_of(&self, connection: &Connection, window: &Window, property: Atom, mode: u32);
}
impl PropertyType for str
{
const TYPE_ATOM: Atom = XCB_ATOM_STRING; const DATA_STRIDE: u32 = 8;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_STRING, 8,
self.len() as _, self.as_ptr() as _);
}
}
}
impl PropertyType for Atom
{
const TYPE_ATOM: Atom = XCB_ATOM_ATOM; const DATA_STRIDE: u32 = 32;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32) | }
}
}
impl<E: PropertyType> PropertyType for [E]
{
const TYPE_ATOM: Atom = E::TYPE_ATOM; const DATA_STRIDE: u32 = E::DATA_STRIDE;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, E::TYPE_ATOM, E::DATA_STRIDE as _,
self.len() as _, self.as_ptr() as _);
}
}
}
pub use self::xcb::ffi::XCB_ATOM_WM_NAME;
pub struct CheckedCookie<'s>(xcb_void_cookie_t, &'s Connection);
impl<'s> CheckedCookie<'s>
{
pub fn check(&self) -> Result<(), GenericError>
{
let r = unsafe { xcb_request_check(self.1.0, self.0) };
if r.is_null() { Ok(()) } else { Err(unsafe { GenericError::from_ptr(r) }) }
}
}
pub struct AtomCookie<'s>(xcb_intern_atom_cookie_t, &'s Connection);
pub type Atom = xcb_atom_t;
impl<'s> AtomCookie<'s>
{
pub fn reply(self) -> Result<Atom, GenericError>
{
let mut _eptr = null_mut();
let r = unsafe { xcb_intern_atom_reply(self.1.0, self.0, &mut _eptr) };
if r.is_null() { Err(unsafe { GenericError::from_ptr(_eptr) }) } else { Ok(MallocBox(r).atom) }
}
}
use std::mem::transmute;
pub struct GenericEvent(MallocBox<xcb_generic_event_t>);
impl Connection
{
pub fn wait_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_wait_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
pub fn poll_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_poll_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
}
impl GenericEvent
{
pub fn response_type(&self) -> u8 { self.0.response_type &!0x80 }
}
pub struct ClientMessageEvent(MallocBox<xcb_client_message_event_t>);
impl ClientMessageEvent
{
pub fn msg_type(&self) -> xcb_atom_t { self.0.type_ }
pub fn data_as_u32(&self) -> u32 { unsafe { *(self.0.data.data.as_ptr() as *const u32) } }
}
pub struct ExposeEvent(MallocBox<xcb_expose_event_t>);
pub struct GenericError(MallocBox<xcb_generic_error_t>);
impl GenericError
{
unsafe fn from_ptr(p: *mut xcb_generic_error_t) -> Self { GenericError(MallocBox(p)) }
}
impl Debug for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "GenericError(code={})", (*self.0).error_code) }
}
impl Display for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <Self as Debug>::fmt(self, fmt) }
}
impl From<GenericError> for IOError
{
fn from(v: GenericError) -> IOError { IOError::new(ErrorKind::Other, Box::new(v)) }
}
impl ::std::error::Error for GenericError
{
fn description(&self) -> &str { "XCB Generic Error" }
fn cause(&self) -> Option<&::std::error::Error> { None }
}
unsafe impl Send for GenericError {}
unsafe impl Sync for GenericError {}
pub trait Event
{
const RESPONSE_ENUM: u8;
unsafe fn from_ref(g: &GenericEvent) -> &Self;
}
impl Event for ClientMessageEvent
{
const RESPONSE_ENUM: u8 = XCB_CLIENT_MESSAGE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for ExposeEvent
{
const RESPONSE_ENUM: u8 = XCB_EXPOSE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for GenericError
{
const RESPONSE_ENUM: u8 = 0; // unused
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
#[repr(C)] pub struct Depth(xcb_depth_t);
impl Depth
{
pub fn depth(&self) -> u8 { self.0.depth }
}
pub struct IterDepths<'c>(xcb_depth_iterator_t<'c>);
impl<'c> Iterator for IterDepths<'c>
{
type Item = &'c Depth;
fn next(&mut self) -> Option<&'c Depth>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_depth_next(&mut self.0); Some(&*p) } }
}
fn size_hint(&self) -> (usize, Option<usize>) { (self.0.rem as _, Some(self.0.rem as _)) }
}
impl Screen
{
pub fn iter_allowed_depths(&self) -> IterDepths { IterDepths(unsafe { xcb_screen_allowed_depths_iterator(&self.0) }) }
}
pub type VisualID = xcb_visualid_t;
#[repr(C)] pub struct VisualType(xcb_visualtype_t);
impl VisualType
{
pub fn id(&self) -> VisualID { self.0.visual_id }
pub fn is_truecolor(&self) -> bool { self.0.class == XCB_VISUAL_CLASS_TRUE_COLOR as _ }
}
pub struct IterVisualTypes<'c>(xcb_visualtype_iterator_t, PhantomData<&'c Connection>);
impl<'c> Iterator for IterVisualTypes<'c>
{
type Item = &'c VisualType;
fn next(&mut self) -> Option<&'c VisualType>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_visualtype_next(&mut self.0); Some(&*p) } }
}
}
impl Depth
{
pub fn iter_visuals(&self) -> IterVisualTypes
{
IterVisualTypes(unsafe { xcb_depth_visuals_iterator(&self.0) }, PhantomData)
}
}
#[allow(non_camel_case_types)]
pub type xcb_bool32_t = u32;
#[repr(C)] #[allow(non_camel_case_types)]
pub struct xcb_create_window_value_list_t
{
pub background_pixmap: xcb_pixmap_t, pub background_pixel: u32,
pub border_pixmap: xcb_pixmap_t, pub border_pixel: u32,
pub bit_gravity: u32, pub win_gravity: u32, pub backing_store: u32, pub backing_planes: u32, pub backing_pixel: u32,
pub override_redirect: xcb_bool32_t, pub save_under: xcb_bool32_t, pub event_mask: u32,
pub do_not_propagate_mask: u32, pub colormap: xcb_colormap_t, pub cursor: xcb_cursor_t
}
extern "C"
{
fn xcb_create_window_value_list_serialize(buffer: *mut *mut ::libc::c_void, value_mask: u32,
aux: *const xcb_create_window_value_list_t) -> ::libc::c_int;
}
#[repr(C)]
pub struct WindowValueList(u32, xcb_create_window_value_list_t);
impl WindowValueList
{
pub fn new() -> Self { WindowValueList(0, unsafe { ::std::mem::zeroed() }) }
pub fn border_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BORDER_PIXEL; self.1.border_pixel = p; self
}
pub fn back_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BACK_PIXEL; self.1.background_pixel = p; self
}
pub fn colormap(&mut self, c: &Colormap) -> &mut Self
{
self.0 |= XCB_CW_COLORMAP; self.1.colormap = c.id(); self
}
pub fn eventmask(&mut self, m: xcb_event_mask_t) -> &mut Self
{
self.0 |= XCB_CW_EVENT_MASK; self.1.event_mask = m; self
}
pub fn serialize(&self) -> MallocBox<::libc::c_void>
{
let mut p = null_mut();
unsafe { xcb_create_window_value_list_serialize(&mut p, self.0, &self.1) };
MallocBox(p)
}
}
pub struct Colormap(xcb_colormap_t);
impl Colormap
{
pub fn new(con: &Connection, visual: VisualID, window: xcb_window_t) -> Self
{
let id = con.new_id();
unsafe { xcb_create_colormap(con.0, XCB_COLORMAP_ALLOC_NONE as _, id, window, visual) }; Colormap(id)
}
pub fn id(&self) -> xcb_colormap_t { self.0 }
}
pub use self::xcb::ffi::{
XCB_EVENT_MASK_EXPOSURE
};
use std::ops::{Deref, DerefMut};
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
/// Owned malloc-ed pointer box
pub struct MallocBox<T:?Sized>(pub *mut T);
impl<T:?Sized> Deref for MallocBox<T> { type Target = T; fn deref(&self) -> &T { unsafe { &*self.0 } } }
impl<T:?Sized> DerefMut for MallocBox<T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.0 } } }
impl<T:?Sized> Drop for MallocBox<T>
{
fn drop(&mut self) { unsafe { ::libc::free(self.0 as *mut _) } }
}
impl<T:?Sized> Debug for MallocBox<T> where T: Debug
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <T as Debug>::fmt(&self, fmt) }
} | {
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_ATOM, 32, 1,
self as *const Atom as *const _); | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.