file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
packfile.rs | use bytes::{BufMut, BytesMut};
use flate2::{write::ZlibEncoder, Compression};
use sha1::{
digest::{generic_array::GenericArray, FixedOutputDirty},
Digest, Sha1,
};
use std::{convert::TryInto, fmt::Write, io::Write as IoWrite};
// The packfile itself is a very simple format. There is a header, a
// series of packed objects (each with it's own header and body) and
// then a checksum trailer. The first four bytes is the string 'PACK',
// which is sort of used to make sure you're getting the start of the
// packfile correctly. This is followed by a 4-byte packfile version
// number and then a 4-byte number of entries in that file.
pub struct PackFile<'a> {
entries: Vec<PackFileEntry<'a>>,
}
impl<'a> PackFile<'a> {
#[must_use]
pub fn new(entries: Vec<PackFileEntry<'a>>) -> Self {
Self { entries }
}
#[must_use]
pub const fn header_size() -> usize {
"PACK".len() + std::mem::size_of::<u32>() + std::mem::size_of::<u32>()
}
#[must_use]
pub const fn footer_size() -> usize {
20
}
pub fn encode_to(&self, original_buf: &mut BytesMut) -> Result<(), anyhow::Error> {
let mut buf = original_buf.split_off(original_buf.len());
buf.reserve(Self::header_size() + Self::footer_size());
// header
buf.extend_from_slice(b"PACK"); // magic header
buf.put_u32(2); // version
buf.put_u32(self.entries.len().try_into()?); // number of entries in the packfile
// body
for entry in &self.entries {
entry.encode_to(&mut buf)?;
}
// footer
buf.extend_from_slice(&sha1::Sha1::digest(&buf[..]));
original_buf.unsplit(buf);
Ok(())
}
}
#[derive(Debug)]
pub struct Commit<'a> {
pub tree: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20], but sha-1 returns a GenericArray
// pub parent: [u8; 20],
pub author: CommitUserInfo<'a>,
pub committer: CommitUserInfo<'a>,
// pub gpgsig: &str,
pub message: &'a str,
}
impl Commit<'_> {
fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> {
let mut tree_hex = [0_u8; 20 * 2];
hex::encode_to_slice(self.tree, &mut tree_hex)?;
out.write_str("tree ")?;
out.extend_from_slice(&tree_hex);
out.write_char('\n')?;
writeln!(out, "author {}", self.author.encode())?;
writeln!(out, "committer {}", self.committer.encode())?;
write!(out, "\n{}", self.message)?;
Ok(())
}
#[must_use]
pub fn size(&self) -> usize {
let mut len = 0;
len += "tree ".len() + (self.tree.len() * 2) + "\n".len();
len += "author ".len() + self.author.size() + "\n".len();
len += "committer ".len() + self.committer.size() + "\n".len();
len += "\n".len() + self.message.len();
len
}
}
#[derive(Copy, Clone, Debug)]
pub struct CommitUserInfo<'a> {
pub name: &'a str,
pub email: &'a str,
pub time: chrono::DateTime<chrono::Utc>,
}
impl CommitUserInfo<'_> {
fn encode(&self) -> String {
// TODO: remove `format!`, `format_args!`?
format!(
"{} <{}> {} +0000",
self.name,
self.email,
self.time.timestamp()
)
}
#[must_use]
pub fn size(&self) -> usize {
let timestamp_len = itoa::Buffer::new().format(self.time.timestamp()).len();
self.name.len()
+ "< ".len()
+ self.email.len()
+ "> ".len()
+ timestamp_len
+ " +0000".len()
}
}
#[derive(Debug)]
pub enum TreeItemKind {
File,
Directory,
}
impl TreeItemKind {
#[must_use]
pub const fn mode(&self) -> &'static str {
match self {
Self::File => "100644",
Self::Directory => "40000",
}
}
}
#[derive(Debug)]
pub struct TreeItem<'a> {
pub kind: TreeItemKind,
pub name: &'a str,
pub hash: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20] - but we have to deal with GenericArrays
}
// `[mode] [name]\0[hash]`
impl TreeItem<'_> {
fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> {
out.write_str(self.kind.mode())?;
write!(out, " {}\0", self.name)?;
out.extend_from_slice(&self.hash);
Ok(())
}
#[must_use]
pub fn size(&self) -> usize {
self.kind.mode().len() + " ".len() + self.name.len() + "\0".len() + self.hash.len()
}
}
#[derive(Debug)]
pub enum PackFileEntry<'a> {
// jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3 | gzip -dc
// commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c
// parent c2a862612a14346ae95234f26efae1ee69b5b7a9
// author Jordan Doyle <[email protected]> 1630244577 +0100
// committer Jordan Doyle <[email protected]> 1630244577 +0100
// gpgsig -----BEGIN PGP SIGNATURE-----
//
// iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt
// xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2
// tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ
// tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6
// omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX
// fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E
// UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO
// FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G
// hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG
// QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE
// Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf
// bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y=
// =fXoH
// -----END PGP SIGNATURE-----
//
// test
Commit(Commit<'a>),
// jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc
// tree 20940000.cargo���CYy��Ve�������100644.gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/��
// kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�!�S&N�100644 test�G2Y�gN�b9vj?��Ut�
Tree(Vec<TreeItem<'a>>),
// jordan@Jordans-MacBook-Pro-2 objects % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3| gzip -dc
// blob 23try and find me in.git
Blob(&'a [u8]),
// Tag,
// OfsDelta,
// RefDelta,
}
impl PackFileEntry<'_> {
fn write_header(&self, buf: &mut BytesMut) {
let mut size = self.uncompressed_size();
// write header
{
let mut val = 0b1000_0000_u8;
val |= match self {
Self::Commit(_) => 0b001,
Self::Tree(_) => 0b010,
Self::Blob(_) => 0b011,
// Self::Tag => 0b100,
// Self::OfsDelta => 0b110,
// Self::RefDelta => 0b111,
} << 4;
// pack the 4 LSBs of the size into the header
#[allow(clippy::cast_possible_truncation)] // value is masked
{
val |= (size & 0b1111) as u8;
}
size >>= 4;
buf.put_u8(val);
}
// write size bytes
while size!= 0 {
// read 7 LSBs from the `size` and push them off for the next iteration
#[allow(clippy::cast_possible_truncation)] // value is masked
let mut val = (size & 0b111_1111) as u8;
size >>= 7;
if size!= 0 {
// MSB set to 1 implies there's more size bytes to come, otherwise
// the data starts after this byte
val |= 1 << 7;
}
buf.put_u8(val);
}
}
pub fn encode_to(&self, original_out: &mut BytesMut) -> Result<(), anyhow::Error> {
self.write_header(original_out); // TODO: this needs space reserving for it
// todo is there a way to stream through the zlibencoder so we don't have to
// have this intermediate bytesmut and vec?
let mut out = BytesMut::new();
let size = self.uncompressed_size();
original_out.reserve(size);
// the data ends up getting compressed but we'll need at least this many bytes
out.reserve(size);
match self {
Self::Commit(commit) => {
commit.encode_to(&mut out)?;
}
Self::Tree(items) => {
for item in items {
item.encode_to(&mut out)?;
}
}
Self::Blob(data) => {
out.extend_from_slice(data);
}
}
debug_assert_eq!(out.len(), size);
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(&out)?;
let compressed_data = e.finish()?;
original_out.extend_from_slice(&compressed_data);
Ok(())
}
#[must_use]
pub fn uncompressed_size(&self) -> usize {
match self {
Self::Commit(commit) => commit.size(),
Self::Tree(items) => items.iter().map(TreeItem::size).sum(),
Self::Blob(data) => data.len(),
}
}
// wen const generics for RustCrypto? :-(
pub fn hash(
&self,
) -> Result<GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, anyhow::Error> {
let size = self.uncompressed_size();
let file_prefix = match self {
Self::Commit(_) => "commit",
Self::Tree(_) => "tree",
Self::Blob(_) => "blob",
};
let size_len = itoa::Buffer::new().format(size).len();
let mut out =
BytesMut::with_capacity(file_prefix.len() + " ".len() + size_len + "\n".len() + size);
write!(out, "{} {}\0", file_prefix, size)?;
match self {
Self::Commit(commit) => {
commit.encode_to(&mut out)?;
}
Self::Tree(items) => {
| e_to(&mut out)?;
}
}
Self::Blob(blob) => {
out.extend_from_slice(blob);
}
}
Ok(sha1::Sha1::digest(&out))
}
}
| for item in items {
item.encod | conditional_block |
packfile.rs | use bytes::{BufMut, BytesMut};
use flate2::{write::ZlibEncoder, Compression};
use sha1::{
digest::{generic_array::GenericArray, FixedOutputDirty},
Digest, Sha1,
};
use std::{convert::TryInto, fmt::Write, io::Write as IoWrite};
// The packfile itself is a very simple format. There is a header, a
// series of packed objects (each with it's own header and body) and
// then a checksum trailer. The first four bytes is the string 'PACK',
// which is sort of used to make sure you're getting the start of the
// packfile correctly. This is followed by a 4-byte packfile version
// number and then a 4-byte number of entries in that file.
pub struct PackFile<'a> {
entries: Vec<PackFileEntry<'a>>,
}
impl<'a> PackFile<'a> {
#[must_use]
pub fn new(entries: Vec<PackFileEntry<'a>>) -> Self {
Self { entries }
}
#[must_use]
pub const fn header_size() -> usize {
"PACK".len() + std::mem::size_of::<u32>() + std::mem::size_of::<u32>()
}
#[must_use]
pub const fn footer_size() -> usize |
pub fn encode_to(&self, original_buf: &mut BytesMut) -> Result<(), anyhow::Error> {
let mut buf = original_buf.split_off(original_buf.len());
buf.reserve(Self::header_size() + Self::footer_size());
// header
buf.extend_from_slice(b"PACK"); // magic header
buf.put_u32(2); // version
buf.put_u32(self.entries.len().try_into()?); // number of entries in the packfile
// body
for entry in &self.entries {
entry.encode_to(&mut buf)?;
}
// footer
buf.extend_from_slice(&sha1::Sha1::digest(&buf[..]));
original_buf.unsplit(buf);
Ok(())
}
}
#[derive(Debug)]
pub struct Commit<'a> {
pub tree: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20], but sha-1 returns a GenericArray
// pub parent: [u8; 20],
pub author: CommitUserInfo<'a>,
pub committer: CommitUserInfo<'a>,
// pub gpgsig: &str,
pub message: &'a str,
}
impl Commit<'_> {
fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> {
let mut tree_hex = [0_u8; 20 * 2];
hex::encode_to_slice(self.tree, &mut tree_hex)?;
out.write_str("tree ")?;
out.extend_from_slice(&tree_hex);
out.write_char('\n')?;
writeln!(out, "author {}", self.author.encode())?;
writeln!(out, "committer {}", self.committer.encode())?;
write!(out, "\n{}", self.message)?;
Ok(())
}
#[must_use]
pub fn size(&self) -> usize {
let mut len = 0;
len += "tree ".len() + (self.tree.len() * 2) + "\n".len();
len += "author ".len() + self.author.size() + "\n".len();
len += "committer ".len() + self.committer.size() + "\n".len();
len += "\n".len() + self.message.len();
len
}
}
#[derive(Copy, Clone, Debug)]
pub struct CommitUserInfo<'a> {
pub name: &'a str,
pub email: &'a str,
pub time: chrono::DateTime<chrono::Utc>,
}
impl CommitUserInfo<'_> {
fn encode(&self) -> String {
// TODO: remove `format!`, `format_args!`?
format!(
"{} <{}> {} +0000",
self.name,
self.email,
self.time.timestamp()
)
}
#[must_use]
pub fn size(&self) -> usize {
let timestamp_len = itoa::Buffer::new().format(self.time.timestamp()).len();
self.name.len()
+ "< ".len()
+ self.email.len()
+ "> ".len()
+ timestamp_len
+ " +0000".len()
}
}
#[derive(Debug)]
pub enum TreeItemKind {
File,
Directory,
}
impl TreeItemKind {
#[must_use]
pub const fn mode(&self) -> &'static str {
match self {
Self::File => "100644",
Self::Directory => "40000",
}
}
}
#[derive(Debug)]
pub struct TreeItem<'a> {
pub kind: TreeItemKind,
pub name: &'a str,
pub hash: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20] - but we have to deal with GenericArrays
}
// `[mode] [name]\0[hash]`
impl TreeItem<'_> {
fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> {
out.write_str(self.kind.mode())?;
write!(out, " {}\0", self.name)?;
out.extend_from_slice(&self.hash);
Ok(())
}
#[must_use]
pub fn size(&self) -> usize {
self.kind.mode().len() + " ".len() + self.name.len() + "\0".len() + self.hash.len()
}
}
#[derive(Debug)]
pub enum PackFileEntry<'a> {
// jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3 | gzip -dc
// commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c
// parent c2a862612a14346ae95234f26efae1ee69b5b7a9
// author Jordan Doyle <[email protected]> 1630244577 +0100
// committer Jordan Doyle <[email protected]> 1630244577 +0100
// gpgsig -----BEGIN PGP SIGNATURE-----
//
// iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt
// xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2
// tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ
// tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6
// omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX
// fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E
// UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO
// FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G
// hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG
// QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE
// Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf
// bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y=
// =fXoH
// -----END PGP SIGNATURE-----
//
// test
Commit(Commit<'a>),
// jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc
// tree 20940000.cargo���CYy��Ve�������100644.gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/��
// kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�!�S&N�100644 test�G2Y�gN�b9vj?��Ut�
Tree(Vec<TreeItem<'a>>),
// jordan@Jordans-MacBook-Pro-2 objects % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3| gzip -dc
// blob 23try and find me in.git
Blob(&'a [u8]),
// Tag,
// OfsDelta,
// RefDelta,
}
impl PackFileEntry<'_> {
fn write_header(&self, buf: &mut BytesMut) {
let mut size = self.uncompressed_size();
// write header
{
let mut val = 0b1000_0000_u8;
val |= match self {
Self::Commit(_) => 0b001,
Self::Tree(_) => 0b010,
Self::Blob(_) => 0b011,
// Self::Tag => 0b100,
// Self::OfsDelta => 0b110,
// Self::RefDelta => 0b111,
} << 4;
// pack the 4 LSBs of the size into the header
#[allow(clippy::cast_possible_truncation)] // value is masked
{
val |= (size & 0b1111) as u8;
}
size >>= 4;
buf.put_u8(val);
}
// write size bytes
while size!= 0 {
// read 7 LSBs from the `size` and push them off for the next iteration
#[allow(clippy::cast_possible_truncation)] // value is masked
let mut val = (size & 0b111_1111) as u8;
size >>= 7;
if size!= 0 {
// MSB set to 1 implies there's more size bytes to come, otherwise
// the data starts after this byte
val |= 1 << 7;
}
buf.put_u8(val);
}
}
pub fn encode_to(&self, original_out: &mut BytesMut) -> Result<(), anyhow::Error> {
self.write_header(original_out); // TODO: this needs space reserving for it
// todo is there a way to stream through the zlibencoder so we don't have to
// have this intermediate bytesmut and vec?
let mut out = BytesMut::new();
let size = self.uncompressed_size();
original_out.reserve(size);
// the data ends up getting compressed but we'll need at least this many bytes
out.reserve(size);
match self {
Self::Commit(commit) => {
commit.encode_to(&mut out)?;
}
Self::Tree(items) => {
for item in items {
item.encode_to(&mut out)?;
}
}
Self::Blob(data) => {
out.extend_from_slice(data);
}
}
debug_assert_eq!(out.len(), size);
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(&out)?;
let compressed_data = e.finish()?;
original_out.extend_from_slice(&compressed_data);
Ok(())
}
#[must_use]
pub fn uncompressed_size(&self) -> usize {
match self {
Self::Commit(commit) => commit.size(),
Self::Tree(items) => items.iter().map(TreeItem::size).sum(),
Self::Blob(data) => data.len(),
}
}
// wen const generics for RustCrypto? :-(
pub fn hash(
&self,
) -> Result<GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, anyhow::Error> {
let size = self.uncompressed_size();
let file_prefix = match self {
Self::Commit(_) => "commit",
Self::Tree(_) => "tree",
Self::Blob(_) => "blob",
};
let size_len = itoa::Buffer::new().format(size).len();
let mut out =
BytesMut::with_capacity(file_prefix.len() + " ".len() + size_len + "\n".len() + size);
write!(out, "{} {}\0", file_prefix, size)?;
match self {
Self::Commit(commit) => {
commit.encode_to(&mut out)?;
}
Self::Tree(items) => {
for item in items {
item.encode_to(&mut out)?;
}
}
Self::Blob(blob) => {
out.extend_from_slice(blob);
}
}
Ok(sha1::Sha1::digest(&out))
}
}
| {
20
} | identifier_body |
packfile.rs | use bytes::{BufMut, BytesMut};
use flate2::{write::ZlibEncoder, Compression};
use sha1::{
digest::{generic_array::GenericArray, FixedOutputDirty},
Digest, Sha1,
};
use std::{convert::TryInto, fmt::Write, io::Write as IoWrite};
// The packfile itself is a very simple format. There is a header, a
// series of packed objects (each with it's own header and body) and
// then a checksum trailer. The first four bytes is the string 'PACK',
// which is sort of used to make sure you're getting the start of the
// packfile correctly. This is followed by a 4-byte packfile version
// number and then a 4-byte number of entries in that file.
pub struct PackFile<'a> {
entries: Vec<PackFileEntry<'a>>,
}
impl<'a> PackFile<'a> {
#[must_use]
pub fn new(entries: Vec<PackFileEntry<'a>>) -> Self {
Self { entries }
}
#[must_use]
pub const fn header_size() -> usize {
"PACK".len() + std::mem::size_of::<u32>() + std::mem::size_of::<u32>()
}
#[must_use]
pub const fn footer_size() -> usize {
20
}
pub fn encode_to(&self, original_buf: &mut BytesMut) -> Result<(), anyhow::Error> {
let mut buf = original_buf.split_off(original_buf.len());
buf.reserve(Self::header_size() + Self::footer_size());
// header
buf.extend_from_slice(b"PACK"); // magic header
buf.put_u32(2); // version
buf.put_u32(self.entries.len().try_into()?); // number of entries in the packfile
// body
for entry in &self.entries {
entry.encode_to(&mut buf)?;
}
// footer
buf.extend_from_slice(&sha1::Sha1::digest(&buf[..]));
original_buf.unsplit(buf);
Ok(())
}
}
#[derive(Debug)]
pub struct Commit<'a> {
pub tree: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20], but sha-1 returns a GenericArray
// pub parent: [u8; 20],
pub author: CommitUserInfo<'a>,
pub committer: CommitUserInfo<'a>,
// pub gpgsig: &str,
pub message: &'a str,
}
impl Commit<'_> {
fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> {
let mut tree_hex = [0_u8; 20 * 2];
hex::encode_to_slice(self.tree, &mut tree_hex)?;
out.write_str("tree ")?;
out.extend_from_slice(&tree_hex);
out.write_char('\n')?;
writeln!(out, "author {}", self.author.encode())?;
writeln!(out, "committer {}", self.committer.encode())?;
write!(out, "\n{}", self.message)?;
Ok(())
}
#[must_use]
pub fn size(&self) -> usize {
let mut len = 0;
len += "tree ".len() + (self.tree.len() * 2) + "\n".len();
len += "author ".len() + self.author.size() + "\n".len();
len += "committer ".len() + self.committer.size() + "\n".len();
len += "\n".len() + self.message.len();
len
}
}
#[derive(Copy, Clone, Debug)]
pub struct CommitUserInfo<'a> {
pub name: &'a str,
pub email: &'a str,
pub time: chrono::DateTime<chrono::Utc>,
}
impl CommitUserInfo<'_> {
fn encode(&self) -> String {
// TODO: remove `format!`, `format_args!`?
format!(
"{} <{}> {} +0000",
self.name,
self.email,
self.time.timestamp()
)
}
#[must_use]
pub fn size(&self) -> usize {
let timestamp_len = itoa::Buffer::new().format(self.time.timestamp()).len();
self.name.len()
+ "< ".len()
+ self.email.len()
+ "> ".len()
+ timestamp_len
+ " +0000".len()
}
}
#[derive(Debug)]
pub enum TreeItemKind {
File,
Directory,
}
impl TreeItemKind {
#[must_use]
pub const fn mode(&self) -> &'static str {
match self {
Self::File => "100644",
Self::Directory => "40000",
}
}
}
#[derive(Debug)]
pub struct TreeItem<'a> {
pub kind: TreeItemKind,
pub name: &'a str,
pub hash: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20] - but we have to deal with GenericArrays
}
// `[mode] [name]\0[hash]`
impl TreeItem<'_> {
fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> {
out.write_str(self.kind.mode())?;
write!(out, " {}\0", self.name)?;
out.extend_from_slice(&self.hash);
Ok(())
}
#[must_use]
pub fn | (&self) -> usize {
self.kind.mode().len() + " ".len() + self.name.len() + "\0".len() + self.hash.len()
}
}
#[derive(Debug)]
pub enum PackFileEntry<'a> {
// jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3 | gzip -dc
// commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c
// parent c2a862612a14346ae95234f26efae1ee69b5b7a9
// author Jordan Doyle <[email protected]> 1630244577 +0100
// committer Jordan Doyle <[email protected]> 1630244577 +0100
// gpgsig -----BEGIN PGP SIGNATURE-----
//
// iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt
// xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2
// tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ
// tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6
// omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX
// fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E
// UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO
// FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G
// hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG
// QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE
// Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf
// bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y=
// =fXoH
// -----END PGP SIGNATURE-----
//
// test
Commit(Commit<'a>),
// jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc
// tree 20940000.cargo���CYy��Ve�������100644.gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/��
// kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�!�S&N�100644 test�G2Y�gN�b9vj?��Ut�
Tree(Vec<TreeItem<'a>>),
// jordan@Jordans-MacBook-Pro-2 objects % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3| gzip -dc
// blob 23try and find me in.git
Blob(&'a [u8]),
// Tag,
// OfsDelta,
// RefDelta,
}
impl PackFileEntry<'_> {
fn write_header(&self, buf: &mut BytesMut) {
let mut size = self.uncompressed_size();
// write header
{
let mut val = 0b1000_0000_u8;
val |= match self {
Self::Commit(_) => 0b001,
Self::Tree(_) => 0b010,
Self::Blob(_) => 0b011,
// Self::Tag => 0b100,
// Self::OfsDelta => 0b110,
// Self::RefDelta => 0b111,
} << 4;
// pack the 4 LSBs of the size into the header
#[allow(clippy::cast_possible_truncation)] // value is masked
{
val |= (size & 0b1111) as u8;
}
size >>= 4;
buf.put_u8(val);
}
// write size bytes
while size!= 0 {
// read 7 LSBs from the `size` and push them off for the next iteration
#[allow(clippy::cast_possible_truncation)] // value is masked
let mut val = (size & 0b111_1111) as u8;
size >>= 7;
if size!= 0 {
// MSB set to 1 implies there's more size bytes to come, otherwise
// the data starts after this byte
val |= 1 << 7;
}
buf.put_u8(val);
}
}
pub fn encode_to(&self, original_out: &mut BytesMut) -> Result<(), anyhow::Error> {
self.write_header(original_out); // TODO: this needs space reserving for it
// todo is there a way to stream through the zlibencoder so we don't have to
// have this intermediate bytesmut and vec?
let mut out = BytesMut::new();
let size = self.uncompressed_size();
original_out.reserve(size);
// the data ends up getting compressed but we'll need at least this many bytes
out.reserve(size);
match self {
Self::Commit(commit) => {
commit.encode_to(&mut out)?;
}
Self::Tree(items) => {
for item in items {
item.encode_to(&mut out)?;
}
}
Self::Blob(data) => {
out.extend_from_slice(data);
}
}
debug_assert_eq!(out.len(), size);
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(&out)?;
let compressed_data = e.finish()?;
original_out.extend_from_slice(&compressed_data);
Ok(())
}
#[must_use]
pub fn uncompressed_size(&self) -> usize {
match self {
Self::Commit(commit) => commit.size(),
Self::Tree(items) => items.iter().map(TreeItem::size).sum(),
Self::Blob(data) => data.len(),
}
}
// wen const generics for RustCrypto? :-(
pub fn hash(
&self,
) -> Result<GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, anyhow::Error> {
let size = self.uncompressed_size();
let file_prefix = match self {
Self::Commit(_) => "commit",
Self::Tree(_) => "tree",
Self::Blob(_) => "blob",
};
let size_len = itoa::Buffer::new().format(size).len();
let mut out =
BytesMut::with_capacity(file_prefix.len() + " ".len() + size_len + "\n".len() + size);
write!(out, "{} {}\0", file_prefix, size)?;
match self {
Self::Commit(commit) => {
commit.encode_to(&mut out)?;
}
Self::Tree(items) => {
for item in items {
item.encode_to(&mut out)?;
}
}
Self::Blob(blob) => {
out.extend_from_slice(blob);
}
}
Ok(sha1::Sha1::digest(&out))
}
}
| size | identifier_name |
poll.rs | extern crate nix;
use std::os::unix::io::AsRawFd;
use std::os::unix::io::RawFd;
use std::time;
use std::io;
use self::nix::sys::epoll;
/// Polls for readiness events on all registered file descriptors.
///
/// `Poll` allows a program to monitor a large number of file descriptors, waiting until one or
/// more become "ready" for some class of operations; e.g. reading and writing. A file descriptor
/// is considered ready if it is possible to immediately perform a corresponding operation; e.g.
/// [`read`].
///
/// These `Poll` instances are optimized for a worker pool use-case, and so they are all
/// oneshot, edge-triggered, and only support "ready to read".
///
/// To use `Poll`, a file descriptor must first be registered with the `Poll` instance using the
/// [`register`] method. A `Token` is also passed to the [`register`] function, and that same
/// `Token` is returned when the given file descriptor is ready.
///
/// [`read`]: tcp/struct.TcpStream.html#method.read
/// [`register`]: #method.register
/// [`reregister`]: #method.reregister
///
/// # Examples
///
/// A basic example -- establishing a `TcpStream` connection.
///
/// ```no_run
/// # extern crate mio;
/// # extern crate mio_pool;
/// # use std::error::Error;
/// # fn try_main() -> Result<(), Box<Error>> {
/// use mio_pool::poll::{Events, Poll, Token};
/// use mio::net::TcpStream;
///
/// use std::net::{TcpListener, SocketAddr};
///
/// // Bind a server socket to connect to.
/// let addr: SocketAddr = "127.0.0.1:0".parse()?;
/// let server = TcpListener::bind(&addr)?;
///
/// // Construct a new `Poll` handle as well as the `Events` we'll store into
/// let poll = Poll::new()?;
/// let mut events = Events::with_capacity(1024);
///
/// // Connect the stream
/// let stream = TcpStream::connect(&server.local_addr()?)?;
///
/// // Register the stream with `Poll`
/// poll.register(&stream, Token(0))?;
///
/// // Wait for the socket to become ready. This has to happens in a loop to
/// // handle spurious wakeups.
/// loop {
/// poll.poll(&mut events, None)?;
///
/// for Token(t) in &events {
/// if t == 0 {
/// // The socket connected (probably; it could be a spurious wakeup)
/// return Ok(());
/// }
/// }
/// }
/// # Ok(())
/// # }
/// # | /// # }
/// ```
///
/// # Exclusive access
///
/// Since this `Poll` implementation is optimized for worker-pool style use-cases, all file
/// descriptors are registered using `EPOLL_ONESHOT`. This means that once an event has been issued
/// for a given descriptor, not more events will be issued for that descriptor until it has been
/// re-registered using [`reregister`].
pub struct Poll(RawFd);
/// Associates an event with a file descriptor.
///
/// `Token` is a wrapper around `usize`, and is used as an argument to
/// [`Poll::register`] and [`Poll::reregister`].
///
/// See [`Poll`] for more documentation on polling. You will likely want to use something like
/// [`slab`] for creating and managing these.
///
/// [`Poll`]: struct.Poll.html
/// [`Poll::register`]: struct.Poll.html#method.register
/// [`Poll::reregister`]: struct.Poll.html#method.reregister
/// [`slab`]: https://crates.io/crates/slab
pub struct Token(pub usize);
/// A collection of readiness events.
///
/// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received
/// since the last poll. Usually, a single `Events` instance is created at the same time as a
/// [`Poll`] and reused on each call to [`Poll::poll`].
///
/// See [`Poll`] for more documentation on polling.
///
/// [`Poll::poll`]: struct.Poll.html#method.poll
/// [`Poll`]: struct.Poll.html
pub struct Events {
all: Vec<epoll::EpollEvent>,
/// How many of the events in `.all` are filled with responses to the last `poll()`?
current: usize,
}
impl Events {
/// Return a new `Events` capable of holding up to `capacity` events.
pub fn with_capacity(capacity: usize) -> Events {
let mut events = Vec::new();
events.resize(capacity, epoll::EpollEvent::empty());
Events {
all: events,
current: 0,
}
}
}
fn nix_to_io_err(e: nix::Error) -> io::Error {
match e {
nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e),
nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e),
}
}
impl Poll {
/// Return a new `Poll` handle.
///
/// This function will make a syscall to the operating system to create the system selector. If
/// this syscall fails, `Poll::new` will return with the error.
///
/// See [struct] level docs for more details.
///
/// [struct]: struct.Poll.html
///
/// # Examples
///
/// ```
/// # use std::error::Error;
/// # fn try_main() -> Result<(), Box<Error>> {
/// use mio_pool::poll::{Poll, Events};
/// use std::time::Duration;
///
/// let poll = match Poll::new() {
/// Ok(poll) => poll,
/// Err(e) => panic!("failed to create Poll instance; err={:?}", e),
/// };
///
/// // Create a structure to receive polled events
/// let mut events = Events::with_capacity(1024);
///
/// // Wait for events, but none will be received because no `Evented`
/// // handles have been registered with this `Poll` instance.
/// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?;
/// assert_eq!(n, 0);
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
pub fn new() -> io::Result<Self> {
epoll::epoll_create1(epoll::EpollCreateFlags::empty())
.map(Poll)
.map_err(nix_to_io_err)
}
fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> {
let mut event = epoll::EpollEvent::new(
epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT,
t.0 as u64,
);
epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err)
}
/// Register a file descriptor with this `Poll` instance.
///
/// Once registered, the `Poll` instance monitors the given descriptor for readiness state
/// changes. When it notices a state change, it will return a readiness event for the handle
/// the next time [`poll`] is called.
///
/// See the [`struct`] docs for a high level overview.
///
/// `token` is user-defined value that is associated with the given `file`. When [`poll`]
/// returns an event for `file`, this token is included. This allows the caller to map the
/// event back to its descriptor. The token associated with a file descriptor can be changed at
/// any time by calling [`reregister`].
pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> {
self.ctl(file, t, epoll::EpollOp::EpollCtlAdd)
}
/// Re-register a file descriptor with this `Poll` instance.
///
/// When you re-register a file descriptor, you can change the details of the registration.
/// Specifically, you can update the `token` specified in previous `register` and `reregister`
/// calls.
///
/// See the [`register`] documentation for details about the function
/// arguments and see the [`struct`] docs for a high level overview of
/// polling.
///
/// [`struct`]: #
/// [`register`]: #method.register
pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> {
self.ctl(file, t, epoll::EpollOp::EpollCtlMod)
}
/// Deregister a file descriptor from this `Poll` instance.
///
/// When you deregister a file descriptor, it will no longer be modified for readiness events,
/// and it will no longer produce events from `poll`.
pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> {
epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None)
.map_err(nix_to_io_err)
}
/// Wait for events on file descriptors associated with this `Poll` instance.
///
/// Blocks the current thread and waits for events for any of the file descriptors that are
/// registered with this `Poll` instance. The function blocks until either at least one
/// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means
/// that `poll` blocks until a readiness event has been received.
///
/// The supplied `events` will be cleared and newly received readiness events will be pushed
/// onto the end. At most `events.capacity()` events will be returned. If there are further
/// pending readiness events, they are returned on the next call to `poll`.
///
/// Note that once an event has been issued for a given `token` (or rather, for the token's
/// file descriptor), no further events will be issued for that descriptor until it has been
/// re-registered. Note also that the `timeout` is rounded up to the system clock granularity
/// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun
/// by a small amount.
///
/// `poll` returns the number of events that have been pushed into `events`, or `Err` when an
/// error has been encountered with the system selector.
///
/// See the [struct] level documentation for a higher level discussion of polling.
///
/// [struct]: #
pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> {
let timeout = match timeout {
None => -1,
Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize,
};
events.current =
epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?;
Ok(events.current)
}
}
/// [`Events`] iterator.
///
/// This struct is created by the `into_iter` method on [`Events`].
///
/// [`Events`]: struct.Events.html
pub struct EventsIterator<'a> {
events: &'a Events,
at: usize,
}
impl<'a> IntoIterator for &'a Events {
type IntoIter = EventsIterator<'a>;
type Item = Token;
fn into_iter(self) -> Self::IntoIter {
EventsIterator {
events: self,
at: 0,
}
}
}
impl<'a> Iterator for EventsIterator<'a> {
type Item = Token;
fn next(&mut self) -> Option<Self::Item> {
let at = &mut self.at;
if *at >= self.events.current {
// events beyond.1 are old
return None;
}
self.events.all.get(*at).map(|e| {
*at += 1;
Token(e.data() as usize)
})
}
} | /// # fn main() {
/// # try_main().unwrap(); | random_line_split |
poll.rs | extern crate nix;
use std::os::unix::io::AsRawFd;
use std::os::unix::io::RawFd;
use std::time;
use std::io;
use self::nix::sys::epoll;
/// Polls for readiness events on all registered file descriptors.
///
/// `Poll` allows a program to monitor a large number of file descriptors, waiting until one or
/// more become "ready" for some class of operations; e.g. reading and writing. A file descriptor
/// is considered ready if it is possible to immediately perform a corresponding operation; e.g.
/// [`read`].
///
/// These `Poll` instances are optimized for a worker pool use-case, and so they are all
/// oneshot, edge-triggered, and only support "ready to read".
///
/// To use `Poll`, a file descriptor must first be registered with the `Poll` instance using the
/// [`register`] method. A `Token` is also passed to the [`register`] function, and that same
/// `Token` is returned when the given file descriptor is ready.
///
/// [`read`]: tcp/struct.TcpStream.html#method.read
/// [`register`]: #method.register
/// [`reregister`]: #method.reregister
///
/// # Examples
///
/// A basic example -- establishing a `TcpStream` connection.
///
/// ```no_run
/// # extern crate mio;
/// # extern crate mio_pool;
/// # use std::error::Error;
/// # fn try_main() -> Result<(), Box<Error>> {
/// use mio_pool::poll::{Events, Poll, Token};
/// use mio::net::TcpStream;
///
/// use std::net::{TcpListener, SocketAddr};
///
/// // Bind a server socket to connect to.
/// let addr: SocketAddr = "127.0.0.1:0".parse()?;
/// let server = TcpListener::bind(&addr)?;
///
/// // Construct a new `Poll` handle as well as the `Events` we'll store into
/// let poll = Poll::new()?;
/// let mut events = Events::with_capacity(1024);
///
/// // Connect the stream
/// let stream = TcpStream::connect(&server.local_addr()?)?;
///
/// // Register the stream with `Poll`
/// poll.register(&stream, Token(0))?;
///
/// // Wait for the socket to become ready. This has to happens in a loop to
/// // handle spurious wakeups.
/// loop {
/// poll.poll(&mut events, None)?;
///
/// for Token(t) in &events {
/// if t == 0 {
/// // The socket connected (probably; it could be a spurious wakeup)
/// return Ok(());
/// }
/// }
/// }
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
///
/// # Exclusive access
///
/// Since this `Poll` implementation is optimized for worker-pool style use-cases, all file
/// descriptors are registered using `EPOLL_ONESHOT`. This means that once an event has been issued
/// for a given descriptor, not more events will be issued for that descriptor until it has been
/// re-registered using [`reregister`].
pub struct Poll(RawFd);
/// Associates an event with a file descriptor.
///
/// `Token` is a wrapper around `usize`, and is used as an argument to
/// [`Poll::register`] and [`Poll::reregister`].
///
/// See [`Poll`] for more documentation on polling. You will likely want to use something like
/// [`slab`] for creating and managing these.
///
/// [`Poll`]: struct.Poll.html
/// [`Poll::register`]: struct.Poll.html#method.register
/// [`Poll::reregister`]: struct.Poll.html#method.reregister
/// [`slab`]: https://crates.io/crates/slab
pub struct Token(pub usize);
/// A collection of readiness events.
///
/// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received
/// since the last poll. Usually, a single `Events` instance is created at the same time as a
/// [`Poll`] and reused on each call to [`Poll::poll`].
///
/// See [`Poll`] for more documentation on polling.
///
/// [`Poll::poll`]: struct.Poll.html#method.poll
/// [`Poll`]: struct.Poll.html
pub struct Events {
all: Vec<epoll::EpollEvent>,
/// How many of the events in `.all` are filled with responses to the last `poll()`?
current: usize,
}
impl Events {
/// Return a new `Events` capable of holding up to `capacity` events.
pub fn with_capacity(capacity: usize) -> Events {
let mut events = Vec::new();
events.resize(capacity, epoll::EpollEvent::empty());
Events {
all: events,
current: 0,
}
}
}
fn nix_to_io_err(e: nix::Error) -> io::Error {
match e {
nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e),
nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e),
}
}
impl Poll {
/// Return a new `Poll` handle.
///
/// This function will make a syscall to the operating system to create the system selector. If
/// this syscall fails, `Poll::new` will return with the error.
///
/// See [struct] level docs for more details.
///
/// [struct]: struct.Poll.html
///
/// # Examples
///
/// ```
/// # use std::error::Error;
/// # fn try_main() -> Result<(), Box<Error>> {
/// use mio_pool::poll::{Poll, Events};
/// use std::time::Duration;
///
/// let poll = match Poll::new() {
/// Ok(poll) => poll,
/// Err(e) => panic!("failed to create Poll instance; err={:?}", e),
/// };
///
/// // Create a structure to receive polled events
/// let mut events = Events::with_capacity(1024);
///
/// // Wait for events, but none will be received because no `Evented`
/// // handles have been registered with this `Poll` instance.
/// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?;
/// assert_eq!(n, 0);
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
pub fn new() -> io::Result<Self> {
epoll::epoll_create1(epoll::EpollCreateFlags::empty())
.map(Poll)
.map_err(nix_to_io_err)
}
fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> {
let mut event = epoll::EpollEvent::new(
epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT,
t.0 as u64,
);
epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err)
}
/// Register a file descriptor with this `Poll` instance.
///
/// Once registered, the `Poll` instance monitors the given descriptor for readiness state
/// changes. When it notices a state change, it will return a readiness event for the handle
/// the next time [`poll`] is called.
///
/// See the [`struct`] docs for a high level overview.
///
/// `token` is user-defined value that is associated with the given `file`. When [`poll`]
/// returns an event for `file`, this token is included. This allows the caller to map the
/// event back to its descriptor. The token associated with a file descriptor can be changed at
/// any time by calling [`reregister`].
pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> {
self.ctl(file, t, epoll::EpollOp::EpollCtlAdd)
}
/// Re-register a file descriptor with this `Poll` instance.
///
/// When you re-register a file descriptor, you can change the details of the registration.
/// Specifically, you can update the `token` specified in previous `register` and `reregister`
/// calls.
///
/// See the [`register`] documentation for details about the function
/// arguments and see the [`struct`] docs for a high level overview of
/// polling.
///
/// [`struct`]: #
/// [`register`]: #method.register
pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> {
self.ctl(file, t, epoll::EpollOp::EpollCtlMod)
}
/// Deregister a file descriptor from this `Poll` instance.
///
/// When you deregister a file descriptor, it will no longer be modified for readiness events,
/// and it will no longer produce events from `poll`.
pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> {
epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None)
.map_err(nix_to_io_err)
}
/// Wait for events on file descriptors associated with this `Poll` instance.
///
/// Blocks the current thread and waits for events for any of the file descriptors that are
/// registered with this `Poll` instance. The function blocks until either at least one
/// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means
/// that `poll` blocks until a readiness event has been received.
///
/// The supplied `events` will be cleared and newly received readiness events will be pushed
/// onto the end. At most `events.capacity()` events will be returned. If there are further
/// pending readiness events, they are returned on the next call to `poll`.
///
/// Note that once an event has been issued for a given `token` (or rather, for the token's
/// file descriptor), no further events will be issued for that descriptor until it has been
/// re-registered. Note also that the `timeout` is rounded up to the system clock granularity
/// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun
/// by a small amount.
///
/// `poll` returns the number of events that have been pushed into `events`, or `Err` when an
/// error has been encountered with the system selector.
///
/// See the [struct] level documentation for a higher level discussion of polling.
///
/// [struct]: #
pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> {
let timeout = match timeout {
None => -1,
Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize,
};
events.current =
epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?;
Ok(events.current)
}
}
/// [`Events`] iterator.
///
/// This struct is created by the `into_iter` method on [`Events`].
///
/// [`Events`]: struct.Events.html
pub struct EventsIterator<'a> {
events: &'a Events,
at: usize,
}
impl<'a> IntoIterator for &'a Events {
type IntoIter = EventsIterator<'a>;
type Item = Token;
fn into_iter(self) -> Self::IntoIter {
EventsIterator {
events: self,
at: 0,
}
}
}
impl<'a> Iterator for EventsIterator<'a> {
type Item = Token;
fn next(&mut self) -> Option<Self::Item> {
let at = &mut self.at;
if *at >= self.events.current |
self.events.all.get(*at).map(|e| {
*at += 1;
Token(e.data() as usize)
})
}
}
| {
// events beyond .1 are old
return None;
} | conditional_block |
poll.rs | extern crate nix;
use std::os::unix::io::AsRawFd;
use std::os::unix::io::RawFd;
use std::time;
use std::io;
use self::nix::sys::epoll;
/// Polls for readiness events on all registered file descriptors.
///
/// `Poll` allows a program to monitor a large number of file descriptors, waiting until one or
/// more become "ready" for some class of operations; e.g. reading and writing. A file descriptor
/// is considered ready if it is possible to immediately perform a corresponding operation; e.g.
/// [`read`].
///
/// These `Poll` instances are optimized for a worker pool use-case, and so they are all
/// oneshot, edge-triggered, and only support "ready to read".
///
/// To use `Poll`, a file descriptor must first be registered with the `Poll` instance using the
/// [`register`] method. A `Token` is also passed to the [`register`] function, and that same
/// `Token` is returned when the given file descriptor is ready.
///
/// [`read`]: tcp/struct.TcpStream.html#method.read
/// [`register`]: #method.register
/// [`reregister`]: #method.reregister
///
/// # Examples
///
/// A basic example -- establishing a `TcpStream` connection.
///
/// ```no_run
/// # extern crate mio;
/// # extern crate mio_pool;
/// # use std::error::Error;
/// # fn try_main() -> Result<(), Box<Error>> {
/// use mio_pool::poll::{Events, Poll, Token};
/// use mio::net::TcpStream;
///
/// use std::net::{TcpListener, SocketAddr};
///
/// // Bind a server socket to connect to.
/// let addr: SocketAddr = "127.0.0.1:0".parse()?;
/// let server = TcpListener::bind(&addr)?;
///
/// // Construct a new `Poll` handle as well as the `Events` we'll store into
/// let poll = Poll::new()?;
/// let mut events = Events::with_capacity(1024);
///
/// // Connect the stream
/// let stream = TcpStream::connect(&server.local_addr()?)?;
///
/// // Register the stream with `Poll`
/// poll.register(&stream, Token(0))?;
///
/// // Wait for the socket to become ready. This has to happens in a loop to
/// // handle spurious wakeups.
/// loop {
/// poll.poll(&mut events, None)?;
///
/// for Token(t) in &events {
/// if t == 0 {
/// // The socket connected (probably; it could be a spurious wakeup)
/// return Ok(());
/// }
/// }
/// }
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
///
/// # Exclusive access
///
/// Since this `Poll` implementation is optimized for worker-pool style use-cases, all file
/// descriptors are registered using `EPOLL_ONESHOT`. This means that once an event has been issued
/// for a given descriptor, not more events will be issued for that descriptor until it has been
/// re-registered using [`reregister`].
pub struct Poll(RawFd);
/// Associates an event with a file descriptor.
///
/// `Token` is a wrapper around `usize`, and is used as an argument to
/// [`Poll::register`] and [`Poll::reregister`].
///
/// See [`Poll`] for more documentation on polling. You will likely want to use something like
/// [`slab`] for creating and managing these.
///
/// [`Poll`]: struct.Poll.html
/// [`Poll::register`]: struct.Poll.html#method.register
/// [`Poll::reregister`]: struct.Poll.html#method.reregister
/// [`slab`]: https://crates.io/crates/slab
pub struct Token(pub usize);
/// A collection of readiness events.
///
/// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received
/// since the last poll. Usually, a single `Events` instance is created at the same time as a
/// [`Poll`] and reused on each call to [`Poll::poll`].
///
/// See [`Poll`] for more documentation on polling.
///
/// [`Poll::poll`]: struct.Poll.html#method.poll
/// [`Poll`]: struct.Poll.html
pub struct Events {
all: Vec<epoll::EpollEvent>,
/// How many of the events in `.all` are filled with responses to the last `poll()`?
current: usize,
}
impl Events {
/// Return a new `Events` capable of holding up to `capacity` events.
pub fn with_capacity(capacity: usize) -> Events {
let mut events = Vec::new();
events.resize(capacity, epoll::EpollEvent::empty());
Events {
all: events,
current: 0,
}
}
}
fn nix_to_io_err(e: nix::Error) -> io::Error {
match e {
nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e),
nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e),
}
}
impl Poll {
/// Return a new `Poll` handle.
///
/// This function will make a syscall to the operating system to create the system selector. If
/// this syscall fails, `Poll::new` will return with the error.
///
/// See [struct] level docs for more details.
///
/// [struct]: struct.Poll.html
///
/// # Examples
///
/// ```
/// # use std::error::Error;
/// # fn try_main() -> Result<(), Box<Error>> {
/// use mio_pool::poll::{Poll, Events};
/// use std::time::Duration;
///
/// let poll = match Poll::new() {
/// Ok(poll) => poll,
/// Err(e) => panic!("failed to create Poll instance; err={:?}", e),
/// };
///
/// // Create a structure to receive polled events
/// let mut events = Events::with_capacity(1024);
///
/// // Wait for events, but none will be received because no `Evented`
/// // handles have been registered with this `Poll` instance.
/// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?;
/// assert_eq!(n, 0);
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
pub fn new() -> io::Result<Self> {
epoll::epoll_create1(epoll::EpollCreateFlags::empty())
.map(Poll)
.map_err(nix_to_io_err)
}
fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> {
let mut event = epoll::EpollEvent::new(
epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT,
t.0 as u64,
);
epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err)
}
/// Register a file descriptor with this `Poll` instance.
///
/// Once registered, the `Poll` instance monitors the given descriptor for readiness state
/// changes. When it notices a state change, it will return a readiness event for the handle
/// the next time [`poll`] is called.
///
/// See the [`struct`] docs for a high level overview.
///
/// `token` is user-defined value that is associated with the given `file`. When [`poll`]
/// returns an event for `file`, this token is included. This allows the caller to map the
/// event back to its descriptor. The token associated with a file descriptor can be changed at
/// any time by calling [`reregister`].
pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> {
self.ctl(file, t, epoll::EpollOp::EpollCtlAdd)
}
/// Re-register a file descriptor with this `Poll` instance.
///
/// When you re-register a file descriptor, you can change the details of the registration.
/// Specifically, you can update the `token` specified in previous `register` and `reregister`
/// calls.
///
/// See the [`register`] documentation for details about the function
/// arguments and see the [`struct`] docs for a high level overview of
/// polling.
///
/// [`struct`]: #
/// [`register`]: #method.register
pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> {
self.ctl(file, t, epoll::EpollOp::EpollCtlMod)
}
/// Deregister a file descriptor from this `Poll` instance.
///
/// When you deregister a file descriptor, it will no longer be modified for readiness events,
/// and it will no longer produce events from `poll`.
pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> {
epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None)
.map_err(nix_to_io_err)
}
/// Wait for events on file descriptors associated with this `Poll` instance.
///
/// Blocks the current thread and waits for events for any of the file descriptors that are
/// registered with this `Poll` instance. The function blocks until either at least one
/// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means
/// that `poll` blocks until a readiness event has been received.
///
/// The supplied `events` will be cleared and newly received readiness events will be pushed
/// onto the end. At most `events.capacity()` events will be returned. If there are further
/// pending readiness events, they are returned on the next call to `poll`.
///
/// Note that once an event has been issued for a given `token` (or rather, for the token's
/// file descriptor), no further events will be issued for that descriptor until it has been
/// re-registered. Note also that the `timeout` is rounded up to the system clock granularity
/// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun
/// by a small amount.
///
/// `poll` returns the number of events that have been pushed into `events`, or `Err` when an
/// error has been encountered with the system selector.
///
/// See the [struct] level documentation for a higher level discussion of polling.
///
/// [struct]: #
pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> {
let timeout = match timeout {
None => -1,
Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize,
};
events.current =
epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?;
Ok(events.current)
}
}
/// [`Events`] iterator.
///
/// This struct is created by the `into_iter` method on [`Events`].
///
/// [`Events`]: struct.Events.html
pub struct | <'a> {
events: &'a Events,
at: usize,
}
impl<'a> IntoIterator for &'a Events {
type IntoIter = EventsIterator<'a>;
type Item = Token;
fn into_iter(self) -> Self::IntoIter {
EventsIterator {
events: self,
at: 0,
}
}
}
impl<'a> Iterator for EventsIterator<'a> {
type Item = Token;
fn next(&mut self) -> Option<Self::Item> {
let at = &mut self.at;
if *at >= self.events.current {
// events beyond.1 are old
return None;
}
self.events.all.get(*at).map(|e| {
*at += 1;
Token(e.data() as usize)
})
}
}
| EventsIterator | identifier_name |
poll.rs | extern crate nix;
use std::os::unix::io::AsRawFd;
use std::os::unix::io::RawFd;
use std::time;
use std::io;
use self::nix::sys::epoll;
/// Polls for readiness events on all registered file descriptors.
///
/// `Poll` allows a program to monitor a large number of file descriptors, waiting until one or
/// more become "ready" for some class of operations; e.g. reading and writing. A file descriptor
/// is considered ready if it is possible to immediately perform a corresponding operation; e.g.
/// [`read`].
///
/// These `Poll` instances are optimized for a worker pool use-case, and so they are all
/// oneshot, edge-triggered, and only support "ready to read".
///
/// To use `Poll`, a file descriptor must first be registered with the `Poll` instance using the
/// [`register`] method. A `Token` is also passed to the [`register`] function, and that same
/// `Token` is returned when the given file descriptor is ready.
///
/// [`read`]: tcp/struct.TcpStream.html#method.read
/// [`register`]: #method.register
/// [`reregister`]: #method.reregister
///
/// # Examples
///
/// A basic example -- establishing a `TcpStream` connection.
///
/// ```no_run
/// # extern crate mio;
/// # extern crate mio_pool;
/// # use std::error::Error;
/// # fn try_main() -> Result<(), Box<Error>> {
/// use mio_pool::poll::{Events, Poll, Token};
/// use mio::net::TcpStream;
///
/// use std::net::{TcpListener, SocketAddr};
///
/// // Bind a server socket to connect to.
/// let addr: SocketAddr = "127.0.0.1:0".parse()?;
/// let server = TcpListener::bind(&addr)?;
///
/// // Construct a new `Poll` handle as well as the `Events` we'll store into
/// let poll = Poll::new()?;
/// let mut events = Events::with_capacity(1024);
///
/// // Connect the stream
/// let stream = TcpStream::connect(&server.local_addr()?)?;
///
/// // Register the stream with `Poll`
/// poll.register(&stream, Token(0))?;
///
/// // Wait for the socket to become ready. This has to happens in a loop to
/// // handle spurious wakeups.
/// loop {
/// poll.poll(&mut events, None)?;
///
/// for Token(t) in &events {
/// if t == 0 {
/// // The socket connected (probably; it could be a spurious wakeup)
/// return Ok(());
/// }
/// }
/// }
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
///
/// # Exclusive access
///
/// Since this `Poll` implementation is optimized for worker-pool style use-cases, all file
/// descriptors are registered using `EPOLL_ONESHOT`. This means that once an event has been issued
/// for a given descriptor, not more events will be issued for that descriptor until it has been
/// re-registered using [`reregister`].
pub struct Poll(RawFd);
/// Associates an event with a file descriptor.
///
/// `Token` is a wrapper around `usize`, and is used as an argument to
/// [`Poll::register`] and [`Poll::reregister`].
///
/// See [`Poll`] for more documentation on polling. You will likely want to use something like
/// [`slab`] for creating and managing these.
///
/// [`Poll`]: struct.Poll.html
/// [`Poll::register`]: struct.Poll.html#method.register
/// [`Poll::reregister`]: struct.Poll.html#method.reregister
/// [`slab`]: https://crates.io/crates/slab
pub struct Token(pub usize);
/// A collection of readiness events.
///
/// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received
/// since the last poll. Usually, a single `Events` instance is created at the same time as a
/// [`Poll`] and reused on each call to [`Poll::poll`].
///
/// See [`Poll`] for more documentation on polling.
///
/// [`Poll::poll`]: struct.Poll.html#method.poll
/// [`Poll`]: struct.Poll.html
pub struct Events {
all: Vec<epoll::EpollEvent>,
/// How many of the events in `.all` are filled with responses to the last `poll()`?
current: usize,
}
impl Events {
/// Return a new `Events` capable of holding up to `capacity` events.
pub fn with_capacity(capacity: usize) -> Events {
let mut events = Vec::new();
events.resize(capacity, epoll::EpollEvent::empty());
Events {
all: events,
current: 0,
}
}
}
fn nix_to_io_err(e: nix::Error) -> io::Error {
match e {
nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e),
nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e),
}
}
impl Poll {
/// Return a new `Poll` handle.
///
/// This function will make a syscall to the operating system to create the system selector. If
/// this syscall fails, `Poll::new` will return with the error.
///
/// See [struct] level docs for more details.
///
/// [struct]: struct.Poll.html
///
/// # Examples
///
/// ```
/// # use std::error::Error;
/// # fn try_main() -> Result<(), Box<Error>> {
/// use mio_pool::poll::{Poll, Events};
/// use std::time::Duration;
///
/// let poll = match Poll::new() {
/// Ok(poll) => poll,
/// Err(e) => panic!("failed to create Poll instance; err={:?}", e),
/// };
///
/// // Create a structure to receive polled events
/// let mut events = Events::with_capacity(1024);
///
/// // Wait for events, but none will be received because no `Evented`
/// // handles have been registered with this `Poll` instance.
/// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?;
/// assert_eq!(n, 0);
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
pub fn new() -> io::Result<Self> {
epoll::epoll_create1(epoll::EpollCreateFlags::empty())
.map(Poll)
.map_err(nix_to_io_err)
}
fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> {
let mut event = epoll::EpollEvent::new(
epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT,
t.0 as u64,
);
epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err)
}
/// Register a file descriptor with this `Poll` instance.
///
/// Once registered, the `Poll` instance monitors the given descriptor for readiness state
/// changes. When it notices a state change, it will return a readiness event for the handle
/// the next time [`poll`] is called.
///
/// See the [`struct`] docs for a high level overview.
///
/// `token` is user-defined value that is associated with the given `file`. When [`poll`]
/// returns an event for `file`, this token is included. This allows the caller to map the
/// event back to its descriptor. The token associated with a file descriptor can be changed at
/// any time by calling [`reregister`].
pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> {
self.ctl(file, t, epoll::EpollOp::EpollCtlAdd)
}
/// Re-register a file descriptor with this `Poll` instance.
///
/// When you re-register a file descriptor, you can change the details of the registration.
/// Specifically, you can update the `token` specified in previous `register` and `reregister`
/// calls.
///
/// See the [`register`] documentation for details about the function
/// arguments and see the [`struct`] docs for a high level overview of
/// polling.
///
/// [`struct`]: #
/// [`register`]: #method.register
pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> {
self.ctl(file, t, epoll::EpollOp::EpollCtlMod)
}
/// Deregister a file descriptor from this `Poll` instance.
///
/// When you deregister a file descriptor, it will no longer be modified for readiness events,
/// and it will no longer produce events from `poll`.
pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> {
epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None)
.map_err(nix_to_io_err)
}
/// Wait for events on file descriptors associated with this `Poll` instance.
///
/// Blocks the current thread and waits for events for any of the file descriptors that are
/// registered with this `Poll` instance. The function blocks until either at least one
/// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means
/// that `poll` blocks until a readiness event has been received.
///
/// The supplied `events` will be cleared and newly received readiness events will be pushed
/// onto the end. At most `events.capacity()` events will be returned. If there are further
/// pending readiness events, they are returned on the next call to `poll`.
///
/// Note that once an event has been issued for a given `token` (or rather, for the token's
/// file descriptor), no further events will be issued for that descriptor until it has been
/// re-registered. Note also that the `timeout` is rounded up to the system clock granularity
/// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun
/// by a small amount.
///
/// `poll` returns the number of events that have been pushed into `events`, or `Err` when an
/// error has been encountered with the system selector.
///
/// See the [struct] level documentation for a higher level discussion of polling.
///
/// [struct]: #
pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> {
let timeout = match timeout {
None => -1,
Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize,
};
events.current =
epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?;
Ok(events.current)
}
}
/// [`Events`] iterator.
///
/// This struct is created by the `into_iter` method on [`Events`].
///
/// [`Events`]: struct.Events.html
pub struct EventsIterator<'a> {
events: &'a Events,
at: usize,
}
impl<'a> IntoIterator for &'a Events {
type IntoIter = EventsIterator<'a>;
type Item = Token;
fn into_iter(self) -> Self::IntoIter {
EventsIterator {
events: self,
at: 0,
}
}
}
impl<'a> Iterator for EventsIterator<'a> {
type Item = Token;
fn next(&mut self) -> Option<Self::Item> |
}
| {
let at = &mut self.at;
if *at >= self.events.current {
// events beyond .1 are old
return None;
}
self.events.all.get(*at).map(|e| {
*at += 1;
Token(e.data() as usize)
})
} | identifier_body |
nexus_label.rs | //! GPT labeling for Nexus devices. The primary partition
//! (/dev/x1) will be used for meta data during, rebuild. The second
//! partition contains the file system.
//!
//! The nexus will adjust internal data structures to offset the IO to the
//! right partition. put differently, when connecting to this device via
//! NVMF or iSCSI it will show up as device with just one partition.
//!
//! When the nexus is removed from the data path and other initiations are
//! used, the data is still accessible and thus removes us has a hard
//! dependency in the data path.
//!
//! # Example:
//!
//! ```bash
//! $ rm /code/disk1.img; truncate -s 1GiB /code/disk1.img
//! $ mctl create gpt -r aio:////code/disk1.img?blk_size=512 -s 1GiB -b
//! $ sgdisk -p /code/disk1.img
//! Found valid GPT with corrupt MBR; using GPT and will write new
//! protective MBR on save.
//! Disk /code//disk1.img: 2097152 sectors, 1024.0 MiB
//! Sector size (logical): 512 bytes
//! Disk identifier (GUID): EAB49A2F-EFEA-45E6-9A1B-61FECE3426DD
//! Partition table holds up to 128 entries
//! Main partition table begins at sector 2 and ends at sector 33
//! First usable sector is 2048, last usable sector is 2097118
//! Partitions will be aligned on 2048-sector boundaries
//! Total free space is 0 sectors (0 bytes)
//!
//! Number Start (sector) End (sector) Size Code Name
//! 1 2048 10239 4.0 MiB FFFF MayaMeta
//! 2 10240 2097118 1019.0 MiB FFFF MayaData
//! ```
//!
//! Notice how two partitions have been created when accessing the disk
//! when shared by the nexus:
//!
//! ```bash
//! $ mctl share gpt
//! "/dev/nbd0"
//!
//! TODO: also note how it complains about a MBR
//!
//! $ lsblk
//! NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
//! sda 8:0 0 50G 0 disk
//! ├─sda1 8:1 0 41.5G 0 part /
//! ├─sda2 8:2 0 7M 0 part [SWAP]
//! └─sda3 8:3 0 511M 0 part /boot
//! sr0 11:0 1 1024M 0 rom
//! nbd0 43:0 0 1019M 0 disk
//! nvme0n1 259:0 0 200G 0 disk /code
//!
//! The nbd0 zero device does not show the partitions
//! ```
use crate::bdev::nexus::Error;
use bincode::{deserialize_from, serialize};
use crc::{crc32, Hasher32};
use serde::{
de::{Deserialize, Deserializer, SeqAccess, Unexpected, Visitor},
ser::{Serialize, SerializeTuple, Serializer},
};
use std::{
fmt::{self, Display},
io::Cursor,
};
use uuid::{self, parser};
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Clone, Copy)]
/// based on RFC4122
pub struct GptGuid {
pub time_low: u32,
pub time_mid: u16,
pub time_high: u16,
pub node: [u8; 8],
}
impl std::str::FromStr for GptGuid {
type Err = parser::ParseError;
fn from_str(uuid: &str) -> Result<Self, Self::Err> {
let fields = uuid::Uuid::from_str(uuid)?;
let fields = fields.as_fields();
Ok(GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
})
}
}
impl std::fmt::Display for GptGuid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
uuid::Uuid::from_fields(
self.time_low,
self.time_mid,
self.time_high,
&self.node,
)
.unwrap()
.to_string()
)
}
}
impl GptGuid {
pub(crate) fn new_random() -> Self {
let fields = uuid::Uuid::new_v4();
let fields = fields.as_fields();
GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
}
}
}
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Copy, Clone)]
pub struct GPTHeader {
/// GPT signature (must be "EFI PART").
pub signature: [u8; 8],
/// 00 00 01 00 up til version 2.17
pub revision: [u8; 4],
/// GPT header size (92 bytes)
pub header_size: u32,
/// CRC32 of the header.
pub self_checksum: u32,
pub reserved: [u8; 4],
/// primary lba where the header is located
pub lba_self: u64,
/// alternative lba where the header is located (backup)
pub lba_alt: u64,
/// first usable lba
pub lba_start: u64,
/// last usable lba
pub lba_end: u64,
/// 16 bytes representing the GUID of the GPT.
pub guid: GptGuid,
/// lba of where to find the partition table
pub lba_table: u64,
/// number of partitions, most tools set this to 128
pub num_entries: u32,
/// Size of element
pub entry_size: u32,
/// CRC32 checksum of the partition array.
pub table_crc: u32,
}
impl GPTHeader {
/// converts a slice into a gpt header and verifies the validity of the data
pub fn from_slice(slice: &[u8]) -> Result<GPTHeader, Error> {
let mut reader = Cursor::new(slice);
let mut gpt: GPTHeader = deserialize_from(&mut reader).unwrap();
if gpt.header_size!= 92
|| gpt.signature!= [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54]
|| gpt.revision!= [0x00, 0x00, 0x01, 0x00]
{
return Err(Error::Invalid);
}
let crc = gpt.self_checksum;
gpt.self_checksum = 0;
gpt.self_checksum = crc32::checksum_ieee(&serialize(&gpt).unwrap());
if gpt.self_checksum!= crc {
info!("GPT label crc mismatch");
return Err(Error::Invalid);
}
if gpt.lba_self > gpt.lba_alt {
std::mem::swap(&mut gpt.lba_self, &mut gpt.lba_alt)
}
Ok(gpt)
}
/// checksum the header with the checksum field itself set 0
pub fn checksum(&mut self) -> u32 {
self.self_checksum = 0;
self.self_checksum = crc32::checksum_ieee(&serialize(&self).unwrap());
self.self_checksum
}
pub fn new(blk_size: u32, num_blocks: u64, guid: uuid::Uuid) -> Self {
let fields = guid.as_fields();
GPTHeader {
signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54],
revision: [0x00, 0x00, 0x01, 0x00],
header_size: 92,
self_checksum: 0,
reserved: [0; 4],
lba_self: 1,
lba_alt: num_blocks - 1,
lba_start: u64::from((1 << 20) / blk_size),
lba_end: ((num_blocks - 1) - u64::from((1 << 14) / blk_size)) - 1,
guid: GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
},
lba_table: 2,
num_entries: 2,
entry_size: 128,
table_crc: 0,
}
}
pub fn to_backup(&self) -> Self {
let mut secondary = *self;
secondary.lba_self = self.lba_alt;
secondary.lba_alt = self.lba_self;
secondary.lba_table = self.lba_end + 1;
secondary
}
}
#[derive(Debug, Default, PartialEq, Deserialize, Serialize, Clone)]
pub struct GptEntry {
/// GUID type, some of them are assigned/reserved for example to Linux
pub ent_type: GptGuid,
/// entry GUID, can be anything typically random
pub ent_guid: GptGuid,
/// start lba for this entry
pub ent_start: u64,
/// end lba for this entry
pub ent_end: u64,
/// entry attributes, according to do the docs bit 0 MUST be zero
pub ent_attr: u64,
/// utf16 name of the partition entry, do not confuse this fs labels!
pub ent_name: GptName,
}
impl GptEntry {
/// converts a slice into a partition array
pub fn from_slice(
slice: &[u8],
parts: u32,
) -> Result<Vec<GptEntry>, Error> {
let mut reader = Cursor::new(slice);
let mut part_vec = Vec::new();
// TODO 128 should be passed in as a argument
for _ in 0.. parts {
part_vec.push(deserialize_from(&mut reader)?);
}
Ok(part_vec)
}
/// calculate the checksum over the partitions table
pub fn checksum(parts: &[GptEntry]) -> u32 {
let mut digest = crc32::Digest::new(crc32::IEEE);
for p in parts {
digest.write(&serialize(p).unwrap());
}
digest.sum32()
}
}
#[derive(Debug, PartialEq, Serialize, Clone)]
/// The nexus label is standard GPT label (such that you can use it without us
/// in the data path) The only thing that is really specific to us is the
/// ent_type GUID if we see that attached to a partition, we assume the data in
/// that partition is ours. In the data we will have more magic markers to
/// confirm the assumption but this is step one.
pub struct NexusLabel {
/// the main GPT header
pub primary: GPTHeader,
/// Vector of GPT entries where the first element is considered to be ours
pub partitions: Vec<GptEntry>,
}
impl NexusLabel {
/// returns the offset to the first data segment
pub(crate) fn offset(&self) -> u64 {
self.partitions[1].ent_start
}
/// returns the number of total blocks in this segment
pub(crate) fn num_blocks(&self) -> u64 {
self.partitions[1].ent_end - self.partitions[1].ent_start
}
}
impl Display for NexusLabel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "GUID: {}", self.primary.guid.to_string())?;
writeln!(f, "\tHeader crc32 {}", self.primary.self_checksum)?;
writeln!(f, "\tPartition table crc32 {}", self.primary.table_crc)?;
for i in 0.. self.partitions.len() {
writeln!(f, "\tPartition number {}", i)?;
writeln!(f, "\tGUID: {}", self.partitions[i].ent_guid.to_string())?;
writeln!(
f,
"\tType GUID: {}",
self.partitions[i].ent_type.to_string()
)?;
writeln!(
f,
"\tLogical block start: {}, end: {}",
self.partitions[i].ent_start, self.partitions[i].ent_end
)?;
}
Ok(())
}
}
// for arrays bigger then 32 elements, things start to get unimplemented
// in terms of derive and what not. So we create a struct with a string
// and tell serde how to use it during (de)serializing
struct GpEntryNameVisitor;
impl<'de> Deserialize<'de> for GptName {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_tuple_struct("GptName", 36, GpEntryNameVisitor)
}
}
impl Serialize for GptName {
fn serialize<S>(
&self,
serializer: S,
) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
// we cant use serialize_type_struct here as we want exactly 72 bytes
let mut s = serializer.serialize_tuple(36)?;
let mut out: Vec<u16> = vec![0; 36];
for (i, o) in self.name.encode_utf16().zip(out.iter_mut()) {
*o = i;
}
out.iter().for_each(|e| s.serialize_element(&e).unwrap());
s.end()
}
}
impl<'de> Visitor<'de> for GpEntryNameVisitor {
type Value = GptName;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Invalid GPT partition name")
}
fn visit_seq<A>(self, mut seq: A) -> std::result::Result<GptName, A::Error>
where
A: SeqAccess<'de>,
{
le | }
#[derive
(Debug, PartialEq, Default, Clone)]
pub struct GptName {
pub name: String,
}
impl GptName {
pub fn as_str(&self) -> &str {
&self.name
}
}
| t mut out = Vec::new();
let mut end = false;
loop {
match seq.next_element()? {
Some(0) => {
end = true;
}
Some(e) if !end => out.push(e),
_ => break,
}
}
if end {
Ok(GptName {
name: String::from_utf16_lossy(&out),
})
} else {
Err(serde::de::Error::invalid_value(Unexpected::Seq, &self))
}
} | identifier_body |
nexus_label.rs | //! GPT labeling for Nexus devices. The primary partition
//! (/dev/x1) will be used for meta data during, rebuild. The second
//! partition contains the file system.
//!
//! The nexus will adjust internal data structures to offset the IO to the
//! right partition. put differently, when connecting to this device via
//! NVMF or iSCSI it will show up as device with just one partition.
//!
//! When the nexus is removed from the data path and other initiations are
//! used, the data is still accessible and thus removes us has a hard
//! dependency in the data path.
//!
//! # Example:
//!
//! ```bash
//! $ rm /code/disk1.img; truncate -s 1GiB /code/disk1.img
//! $ mctl create gpt -r aio:////code/disk1.img?blk_size=512 -s 1GiB -b
//! $ sgdisk -p /code/disk1.img
//! Found valid GPT with corrupt MBR; using GPT and will write new
//! protective MBR on save.
//! Disk /code//disk1.img: 2097152 sectors, 1024.0 MiB
//! Sector size (logical): 512 bytes
//! Disk identifier (GUID): EAB49A2F-EFEA-45E6-9A1B-61FECE3426DD
//! Partition table holds up to 128 entries
//! Main partition table begins at sector 2 and ends at sector 33
//! First usable sector is 2048, last usable sector is 2097118
//! Partitions will be aligned on 2048-sector boundaries
//! Total free space is 0 sectors (0 bytes)
//!
//! Number Start (sector) End (sector) Size Code Name
//! 1 2048 10239 4.0 MiB FFFF MayaMeta
//! 2 10240 2097118 1019.0 MiB FFFF MayaData
//! ```
//!
//! Notice how two partitions have been created when accessing the disk
//! when shared by the nexus:
//!
//! ```bash
//! $ mctl share gpt
//! "/dev/nbd0"
//!
//! TODO: also note how it complains about a MBR
//!
//! $ lsblk
//! NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
//! sda 8:0 0 50G 0 disk
//! ├─sda1 8:1 0 41.5G 0 part /
//! ├─sda2 8:2 0 7M 0 part [SWAP]
//! └─sda3 8:3 0 511M 0 part /boot
//! sr0 11:0 1 1024M 0 rom
//! nbd0 43:0 0 1019M 0 disk
//! nvme0n1 259:0 0 200G 0 disk /code
//!
//! The nbd0 zero device does not show the partitions
//! ```
use crate::bdev::nexus::Error;
use bincode::{deserialize_from, serialize};
use crc::{crc32, Hasher32};
use serde::{
de::{Deserialize, Deserializer, SeqAccess, Unexpected, Visitor},
ser::{Serialize, SerializeTuple, Serializer},
};
use std::{
fmt::{self, Display},
io::Cursor,
};
use uuid::{self, parser};
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Clone, Copy)]
/// based on RFC4122
pub struct GptGuid {
pub time_low: u32,
pub time_mid: u16,
pub time_high: u16,
pub node: [u8; 8],
}
impl std::str::FromStr for GptGuid {
type Err = parser::ParseError;
fn from_str(uuid: &str) -> Result<Self, Self::Err> {
let fields = uuid::Uuid::from_str(uuid)?;
let fields = fields.as_fields();
Ok(GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
})
}
}
impl std::fmt::Display for GptGuid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
uuid::Uuid::from_fields(
self.time_low,
self.time_mid,
self.time_high,
&self.node,
)
.unwrap() | impl GptGuid {
pub(crate) fn new_random() -> Self {
let fields = uuid::Uuid::new_v4();
let fields = fields.as_fields();
GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
}
}
}
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Copy, Clone)]
pub struct GPTHeader {
/// GPT signature (must be "EFI PART").
pub signature: [u8; 8],
/// 00 00 01 00 up til version 2.17
pub revision: [u8; 4],
/// GPT header size (92 bytes)
pub header_size: u32,
/// CRC32 of the header.
pub self_checksum: u32,
pub reserved: [u8; 4],
/// primary lba where the header is located
pub lba_self: u64,
/// alternative lba where the header is located (backup)
pub lba_alt: u64,
/// first usable lba
pub lba_start: u64,
/// last usable lba
pub lba_end: u64,
/// 16 bytes representing the GUID of the GPT.
pub guid: GptGuid,
/// lba of where to find the partition table
pub lba_table: u64,
/// number of partitions, most tools set this to 128
pub num_entries: u32,
/// Size of element
pub entry_size: u32,
/// CRC32 checksum of the partition array.
pub table_crc: u32,
}
impl GPTHeader {
/// converts a slice into a gpt header and verifies the validity of the data
pub fn from_slice(slice: &[u8]) -> Result<GPTHeader, Error> {
let mut reader = Cursor::new(slice);
let mut gpt: GPTHeader = deserialize_from(&mut reader).unwrap();
if gpt.header_size!= 92
|| gpt.signature!= [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54]
|| gpt.revision!= [0x00, 0x00, 0x01, 0x00]
{
return Err(Error::Invalid);
}
let crc = gpt.self_checksum;
gpt.self_checksum = 0;
gpt.self_checksum = crc32::checksum_ieee(&serialize(&gpt).unwrap());
if gpt.self_checksum!= crc {
info!("GPT label crc mismatch");
return Err(Error::Invalid);
}
if gpt.lba_self > gpt.lba_alt {
std::mem::swap(&mut gpt.lba_self, &mut gpt.lba_alt)
}
Ok(gpt)
}
/// checksum the header with the checksum field itself set 0
pub fn checksum(&mut self) -> u32 {
self.self_checksum = 0;
self.self_checksum = crc32::checksum_ieee(&serialize(&self).unwrap());
self.self_checksum
}
pub fn new(blk_size: u32, num_blocks: u64, guid: uuid::Uuid) -> Self {
let fields = guid.as_fields();
GPTHeader {
signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54],
revision: [0x00, 0x00, 0x01, 0x00],
header_size: 92,
self_checksum: 0,
reserved: [0; 4],
lba_self: 1,
lba_alt: num_blocks - 1,
lba_start: u64::from((1 << 20) / blk_size),
lba_end: ((num_blocks - 1) - u64::from((1 << 14) / blk_size)) - 1,
guid: GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
},
lba_table: 2,
num_entries: 2,
entry_size: 128,
table_crc: 0,
}
}
pub fn to_backup(&self) -> Self {
let mut secondary = *self;
secondary.lba_self = self.lba_alt;
secondary.lba_alt = self.lba_self;
secondary.lba_table = self.lba_end + 1;
secondary
}
}
#[derive(Debug, Default, PartialEq, Deserialize, Serialize, Clone)]
pub struct GptEntry {
/// GUID type, some of them are assigned/reserved for example to Linux
pub ent_type: GptGuid,
/// entry GUID, can be anything typically random
pub ent_guid: GptGuid,
/// start lba for this entry
pub ent_start: u64,
/// end lba for this entry
pub ent_end: u64,
/// entry attributes, according to do the docs bit 0 MUST be zero
pub ent_attr: u64,
/// utf16 name of the partition entry, do not confuse this fs labels!
pub ent_name: GptName,
}
impl GptEntry {
/// converts a slice into a partition array
pub fn from_slice(
slice: &[u8],
parts: u32,
) -> Result<Vec<GptEntry>, Error> {
let mut reader = Cursor::new(slice);
let mut part_vec = Vec::new();
// TODO 128 should be passed in as a argument
for _ in 0.. parts {
part_vec.push(deserialize_from(&mut reader)?);
}
Ok(part_vec)
}
/// calculate the checksum over the partitions table
pub fn checksum(parts: &[GptEntry]) -> u32 {
let mut digest = crc32::Digest::new(crc32::IEEE);
for p in parts {
digest.write(&serialize(p).unwrap());
}
digest.sum32()
}
}
#[derive(Debug, PartialEq, Serialize, Clone)]
/// The nexus label is standard GPT label (such that you can use it without us
/// in the data path) The only thing that is really specific to us is the
/// ent_type GUID if we see that attached to a partition, we assume the data in
/// that partition is ours. In the data we will have more magic markers to
/// confirm the assumption but this is step one.
pub struct NexusLabel {
/// the main GPT header
pub primary: GPTHeader,
/// Vector of GPT entries where the first element is considered to be ours
pub partitions: Vec<GptEntry>,
}
impl NexusLabel {
/// returns the offset to the first data segment
pub(crate) fn offset(&self) -> u64 {
self.partitions[1].ent_start
}
/// returns the number of total blocks in this segment
pub(crate) fn num_blocks(&self) -> u64 {
self.partitions[1].ent_end - self.partitions[1].ent_start
}
}
impl Display for NexusLabel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "GUID: {}", self.primary.guid.to_string())?;
writeln!(f, "\tHeader crc32 {}", self.primary.self_checksum)?;
writeln!(f, "\tPartition table crc32 {}", self.primary.table_crc)?;
for i in 0.. self.partitions.len() {
writeln!(f, "\tPartition number {}", i)?;
writeln!(f, "\tGUID: {}", self.partitions[i].ent_guid.to_string())?;
writeln!(
f,
"\tType GUID: {}",
self.partitions[i].ent_type.to_string()
)?;
writeln!(
f,
"\tLogical block start: {}, end: {}",
self.partitions[i].ent_start, self.partitions[i].ent_end
)?;
}
Ok(())
}
}
// for arrays bigger then 32 elements, things start to get unimplemented
// in terms of derive and what not. So we create a struct with a string
// and tell serde how to use it during (de)serializing
struct GpEntryNameVisitor;
impl<'de> Deserialize<'de> for GptName {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_tuple_struct("GptName", 36, GpEntryNameVisitor)
}
}
impl Serialize for GptName {
fn serialize<S>(
&self,
serializer: S,
) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
// we cant use serialize_type_struct here as we want exactly 72 bytes
let mut s = serializer.serialize_tuple(36)?;
let mut out: Vec<u16> = vec![0; 36];
for (i, o) in self.name.encode_utf16().zip(out.iter_mut()) {
*o = i;
}
out.iter().for_each(|e| s.serialize_element(&e).unwrap());
s.end()
}
}
impl<'de> Visitor<'de> for GpEntryNameVisitor {
type Value = GptName;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Invalid GPT partition name")
}
fn visit_seq<A>(self, mut seq: A) -> std::result::Result<GptName, A::Error>
where
A: SeqAccess<'de>,
{
let mut out = Vec::new();
let mut end = false;
loop {
match seq.next_element()? {
Some(0) => {
end = true;
}
Some(e) if!end => out.push(e),
_ => break,
}
}
if end {
Ok(GptName {
name: String::from_utf16_lossy(&out),
})
} else {
Err(serde::de::Error::invalid_value(Unexpected::Seq, &self))
}
}
}
#[derive(Debug, PartialEq, Default, Clone)]
pub struct GptName {
pub name: String,
}
impl GptName {
pub fn as_str(&self) -> &str {
&self.name
}
} | .to_string()
)
}
}
| random_line_split |
nexus_label.rs | //! GPT labeling for Nexus devices. The primary partition
//! (/dev/x1) will be used for meta data during, rebuild. The second
//! partition contains the file system.
//!
//! The nexus will adjust internal data structures to offset the IO to the
//! right partition. put differently, when connecting to this device via
//! NVMF or iSCSI it will show up as device with just one partition.
//!
//! When the nexus is removed from the data path and other initiations are
//! used, the data is still accessible and thus removes us has a hard
//! dependency in the data path.
//!
//! # Example:
//!
//! ```bash
//! $ rm /code/disk1.img; truncate -s 1GiB /code/disk1.img
//! $ mctl create gpt -r aio:////code/disk1.img?blk_size=512 -s 1GiB -b
//! $ sgdisk -p /code/disk1.img
//! Found valid GPT with corrupt MBR; using GPT and will write new
//! protective MBR on save.
//! Disk /code//disk1.img: 2097152 sectors, 1024.0 MiB
//! Sector size (logical): 512 bytes
//! Disk identifier (GUID): EAB49A2F-EFEA-45E6-9A1B-61FECE3426DD
//! Partition table holds up to 128 entries
//! Main partition table begins at sector 2 and ends at sector 33
//! First usable sector is 2048, last usable sector is 2097118
//! Partitions will be aligned on 2048-sector boundaries
//! Total free space is 0 sectors (0 bytes)
//!
//! Number Start (sector) End (sector) Size Code Name
//! 1 2048 10239 4.0 MiB FFFF MayaMeta
//! 2 10240 2097118 1019.0 MiB FFFF MayaData
//! ```
//!
//! Notice how two partitions have been created when accessing the disk
//! when shared by the nexus:
//!
//! ```bash
//! $ mctl share gpt
//! "/dev/nbd0"
//!
//! TODO: also note how it complains about a MBR
//!
//! $ lsblk
//! NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
//! sda 8:0 0 50G 0 disk
//! ├─sda1 8:1 0 41.5G 0 part /
//! ├─sda2 8:2 0 7M 0 part [SWAP]
//! └─sda3 8:3 0 511M 0 part /boot
//! sr0 11:0 1 1024M 0 rom
//! nbd0 43:0 0 1019M 0 disk
//! nvme0n1 259:0 0 200G 0 disk /code
//!
//! The nbd0 zero device does not show the partitions
//! ```
use crate::bdev::nexus::Error;
use bincode::{deserialize_from, serialize};
use crc::{crc32, Hasher32};
use serde::{
de::{Deserialize, Deserializer, SeqAccess, Unexpected, Visitor},
ser::{Serialize, SerializeTuple, Serializer},
};
use std::{
fmt::{self, Display},
io::Cursor,
};
use uuid::{self, parser};
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Clone, Copy)]
/// based on RFC4122
pub struct GptGuid {
pub time_low: u32,
pub time_mid: u16,
pub time_high: u16,
pub node: [u8; 8],
}
impl std::str::FromStr for GptGuid {
type Err = parser::ParseError;
fn from_str(uuid: &str) -> Result<Self, Self::Err> {
let fields = uuid::Uuid::from_str(uuid)?;
let fields = fields.as_fields();
Ok(GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
})
}
}
impl std::fmt::Display for GptGuid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
uuid::Uuid::from_fields(
self.time_low,
self.time_mid,
self.time_high,
&self.node,
)
.unwrap()
.to_string()
)
}
}
impl GptGuid {
pub(crate) fn new_random() -> Self {
let fields = uuid::Uuid::new_v4();
let fields = fields.as_fields();
GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
}
}
}
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Copy, Clone)]
pub struct GPTHeader {
/// GPT signature (must be "EFI PART").
pub signature: [u8; 8],
/// 00 00 01 00 up til version 2.17
pub revision: [u8; 4],
/// GPT header size (92 bytes)
pub header_size: u32,
/// CRC32 of the header.
pub self_checksum: u32,
pub reserved: [u8; 4],
/// primary lba where the header is located
pub lba_self: u64,
/// alternative lba where the header is located (backup)
pub lba_alt: u64,
/// first usable lba
pub lba_start: u64,
/// last usable lba
pub lba_end: u64,
/// 16 bytes representing the GUID of the GPT.
pub guid: GptGuid,
/// lba of where to find the partition table
pub lba_table: u64,
/// number of partitions, most tools set this to 128
pub num_entries: u32,
/// Size of element
pub entry_size: u32,
/// CRC32 checksum of the partition array.
pub table_crc: u32,
}
impl GPTHeader {
/// converts a slice into a gpt header and verifies the validity of the data
pub fn from_slice(slice: &[u8]) -> Result<GPTHeader, Error> {
let mut reader = Cursor::new(slice);
let mut gpt: GPTHeader = deserialize_from(&mut reader).unwrap();
if gpt.header_size!= 92
|| gpt.signature!= [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54]
|| gpt.revision!= [0x00, 0x00, 0x01, 0x00]
{
return Err(Error::Invalid);
}
let crc = gpt.self_checksum;
gpt.self_checksum = 0;
gpt.self_checksum = crc32::checksum_ieee(&serialize(&gpt).unwrap());
if gpt.self_checksum!= crc {
info!("GPT label crc mismatch");
return Err(Error::Invalid);
}
if gpt.lba_self > gpt.lba_alt {
std::mem::swap(&mut gpt.lba_self, &mut gpt.lba_alt)
}
Ok(gpt)
}
/// checksum the header with the checksum field itself set 0
pub fn checksum(&mut self) -> u32 {
self.self_checksum = 0;
self.self_checksum = crc32::checksum_ieee(&serialize(&self).unwrap());
self.self_checksum
}
pub fn new(blk_size: u32, num_blocks: u64, guid: uuid::Uuid) -> Self {
let fields = guid.as_fields();
GPTHeader {
signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54],
revision: [0x00, 0x00, 0x01, 0x00],
header_size: 92,
self_checksum: 0,
reserved: [0; 4],
lba_self: 1,
lba_alt: num_blocks - 1,
lba_start: u64::from((1 << 20) / blk_size),
lba_end: ((num_blocks - 1) - u64::from((1 << 14) / blk_size)) - 1,
guid: GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
},
lba_table: 2,
num_entries: 2,
entry_size: 128,
table_crc: 0,
}
}
pub fn to_backup(&self) -> Self {
let mut secondary = *self;
secondary.lba_self = self.lba_alt;
secondary.lba_alt = self.lba_self;
secondary.lba_table = self.lba_end + 1;
secondary
}
}
#[derive(Debug, Default, PartialEq, Deserialize, Serialize, Clone)]
pub struct GptEntry {
/// GUID type, some of them are assigned/reserved for example to Linux
pub ent_type: GptGuid,
/// entry GUID, can be anything typically random
pub ent_guid: GptGuid,
/// start lba for this entry
pub ent_start: u64,
/// end lba for this entry
pub ent_end: u64,
/// entry attributes, according to do the docs bit 0 MUST be zero
pub ent_attr: u64,
/// utf16 name of the partition entry, do not confuse this fs labels!
pub ent_name: GptName,
}
impl GptEntry {
/// converts a slice into a partition array
pub fn from_slice(
slice: &[u8],
parts: u32,
) -> Result<Vec<GptEntry>, Error> {
let mut reader = Cursor::new(slice);
let mut part_vec = Vec::new();
// TODO 128 should be passed in as a argument
for _ in 0.. parts {
part_vec.push(deserialize_from(&mut reader)?);
}
Ok(part_vec)
}
/// calculate the checksum over the partitions table
pub fn checksum(parts: &[GptEntry]) -> u32 {
let mut digest = crc32::Digest::new(crc32::IEEE);
for p in parts {
digest.write(&serialize(p).unwrap());
}
digest.sum32()
}
}
#[derive(Debug, PartialEq, Serialize, Clone)]
/// The nexus label is standard GPT label (such that you can use it without us
/// in the data path) The only thing that is really specific to us is the
/// ent_type GUID if we see that attached to a partition, we assume the data in
/// that partition is ours. In the data we will have more magic markers to
/// confirm the assumption but this is step one.
pub struct NexusLabel {
/// the main GPT header
pub primary: GPTHeader,
/// Vector of GPT entries where the first element is considered to be ours
pub partitions: Vec<GptEntry>,
}
impl NexusLabel {
/// returns the offset to the first data segment
pub(crate) fn offset(&self) -> u64 {
self.partitions[1].ent_start
}
/// returns the number of total blocks in this segment
pub(crate) fn num_blocks(&self) -> u64 {
self.partitions[1].ent_end - self.partitions[1].ent_start
}
}
impl Display for NexusLabel {
fn fmt(&self, f | mut fmt::Formatter) -> fmt::Result {
writeln!(f, "GUID: {}", self.primary.guid.to_string())?;
writeln!(f, "\tHeader crc32 {}", self.primary.self_checksum)?;
writeln!(f, "\tPartition table crc32 {}", self.primary.table_crc)?;
for i in 0.. self.partitions.len() {
writeln!(f, "\tPartition number {}", i)?;
writeln!(f, "\tGUID: {}", self.partitions[i].ent_guid.to_string())?;
writeln!(
f,
"\tType GUID: {}",
self.partitions[i].ent_type.to_string()
)?;
writeln!(
f,
"\tLogical block start: {}, end: {}",
self.partitions[i].ent_start, self.partitions[i].ent_end
)?;
}
Ok(())
}
}
// for arrays bigger then 32 elements, things start to get unimplemented
// in terms of derive and what not. So we create a struct with a string
// and tell serde how to use it during (de)serializing
struct GpEntryNameVisitor;
impl<'de> Deserialize<'de> for GptName {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_tuple_struct("GptName", 36, GpEntryNameVisitor)
}
}
impl Serialize for GptName {
fn serialize<S>(
&self,
serializer: S,
) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
// we cant use serialize_type_struct here as we want exactly 72 bytes
let mut s = serializer.serialize_tuple(36)?;
let mut out: Vec<u16> = vec![0; 36];
for (i, o) in self.name.encode_utf16().zip(out.iter_mut()) {
*o = i;
}
out.iter().for_each(|e| s.serialize_element(&e).unwrap());
s.end()
}
}
impl<'de> Visitor<'de> for GpEntryNameVisitor {
type Value = GptName;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Invalid GPT partition name")
}
fn visit_seq<A>(self, mut seq: A) -> std::result::Result<GptName, A::Error>
where
A: SeqAccess<'de>,
{
let mut out = Vec::new();
let mut end = false;
loop {
match seq.next_element()? {
Some(0) => {
end = true;
}
Some(e) if!end => out.push(e),
_ => break,
}
}
if end {
Ok(GptName {
name: String::from_utf16_lossy(&out),
})
} else {
Err(serde::de::Error::invalid_value(Unexpected::Seq, &self))
}
}
}
#[derive(Debug, PartialEq, Default, Clone)]
pub struct GptName {
pub name: String,
}
impl GptName {
pub fn as_str(&self) -> &str {
&self.name
}
}
| : & | identifier_name |
lib.rs | #![allow(unused_imports)]
#[macro_use]
extern crate log;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use bytes::{BufMut, BytesMut};
use tokio::net::TcpStream;
use byteorder::{ByteOrder, BigEndian};
use futures::try_ready;
use futures::future::Either;
use tokio::prelude::*;
use chrono::naive::NaiveDateTime;
mod commcheck;
pub use commcheck::CommCheck;
#[derive(Clone)]
pub struct SprinklerOptions {
pub heart_beat: u64,
pub retry_delay: u64,
pub master_addr: String,
pub _id: usize,
pub _hostname: String,
}
impl Default for SprinklerOptions {
fn default() -> Self {
SprinklerOptions {
heart_beat: 3,
retry_delay: 20,
master_addr: String::from("localhost"),
_id: 0,
_hostname: String::from("localhost")
}
}
}
/// Sprinkler Builder
pub struct SprinklerBuilder {
params: SprinklerOptions,
counter: usize
}
impl SprinklerBuilder {
pub fn new(params: SprinklerOptions) -> Self {
SprinklerBuilder {
params,
counter: 0
}
}
}
impl SprinklerBuilder {
pub fn build<T: Sprinkler>(&mut self, hostname: String) -> T {
let next = self.counter;
self.counter += 1;
T::build(SprinklerOptions {
_id: next,
_hostname: hostname,
..self.params.clone()
})
}
}
#[cfg(feature = "master")]
type EncryptedStream = tokio_tls::TlsStream<TcpStream>;
/// A TCP stream adapter to convert between byte stream and objects
#[cfg(feature = "master")]
#[derive(Debug)]
pub struct SprinklerProto {
socket: EncryptedStream,
read_buffer: BytesMut,
}
#[cfg(feature = "master")]
impl SprinklerProto {
pub fn new(socket: EncryptedStream) -> Self {
SprinklerProto {
socket,
read_buffer: BytesMut::new(),
}
}
/// Update read buffer
fn check(&mut self) -> Poll<(), std::io::Error> {
loop { // Why do I have a loop here? I forgot??
self.read_buffer.reserve(512);
let n = try_ready!(self.socket.read_buf(&mut self.read_buffer));
if n == 0 {
return Ok(Async::Ready(()));
}
}
}
}
/// Encode a message and place it in a write buffer
pub fn compose_message(from: usize, msg: String) -> BytesMut {
let mut write_buffer = BytesMut::new();
write_buffer.reserve(512);
write_buffer.put_u16_be(from as u16);
write_buffer.put_i64_be(chrono::Local::now().timestamp());
write_buffer.put_u16_be(msg.len() as u16);
write_buffer.put(msg);
write_buffer
}
/// Message header
#[derive(Clone, Debug)]
pub struct SprinklerProtoHeader {
id: u16,
timestamp: i64,
len: u16
}
#[cfg(feature = "master")]
impl Stream for SprinklerProto {
type Item = SprinklerProtoHeader;
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let sock_closed = self.check()?.is_ready();
if self.read_buffer.len() > 12 {
Ok(Async::Ready(Some(SprinklerProtoHeader {
id: BigEndian::read_u16(&self.read_buffer.split_to(2)),
timestamp: BigEndian::read_u64(&self.read_buffer.split_to(8)) as i64,
len: BigEndian::read_u16(&self.read_buffer.split_to(2))
})))
}
else {
if sock_closed { Ok(Async::Ready(None)) }
else { Ok(Async::NotReady) }
}
}
}
#[derive(Clone)]
pub enum Transmitter<T> {
/// Synchronous Sender
Synchronous(std::sync::mpsc::Sender<T>),
/// Asynchronous Sender | pub fn send(&self, t: T) -> Result<(), ()> {
match self {
Transmitter::Synchronous(sender) => sender.send(t).map_err(|_| ()),
Transmitter::Asynchronous(sender) => {
tokio::spawn({
let sender = sender.clone();
sender.send(t).into_future().map(|_| ()).map_err(|_| ())
});
Ok(())
}
}
}
}
#[derive(Clone)]
pub struct Switch {
pub inner: Arc<Mutex<HashMap<usize, Transmitter<Message>>>>
}
impl Switch {
pub fn new() -> Self {
Switch { inner: Arc::new(Mutex::new(HashMap::new())) }
}
pub fn connect_all<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(&self, sprinklers: I) {
let mut switch_init = self.inner.lock().unwrap();
for i in sprinklers {
match i.activate_master() {
ActivationResult::RealtimeMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Synchronous(monitor)); },
ActivationResult::AsyncMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Asynchronous(monitor)); }
}
}
}
}
/// Message relay between master threads and TCP sockets connected to remote agents
#[cfg(feature = "master")]
pub struct SprinklerRelay {
pub proto: SprinklerProto,
pub header: SprinklerProtoHeader,
pub switch: Switch
}
#[cfg(feature = "master")]
impl Future for SprinklerRelay {
type Item = ();
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let sock_closed = self.proto.check()?.is_ready();
if self.proto.read_buffer.len() >= self.header.len as usize {
if let Ok(msgbody) = String::from_utf8(self.proto.read_buffer.to_vec()) {
if let Some(tx) = self.switch.inner.lock().unwrap().get(&(self.header.id as usize)) {
if let Err(_) = tx.send(Message{
timestamp: NaiveDateTime::from_timestamp(self.header.timestamp, 0),
body: msgbody
}) {
warn!("Failed to relay the message.");
}
}
Ok(Async::Ready(()))
}
else {
warn!("Failed to decode message.");
Ok(Async::Ready(()))
}
}
else {
if sock_closed {
warn!("Message was lost.");
Ok(Async::Ready(()))
}
else { Ok(Async::NotReady) }
}
}
}
pub enum ActivationResult {
/// A realtime algorithm based master thread that monitors agent threads
RealtimeMonitor(std::sync::mpsc::Sender<Message>),
/// An asynchronous master thread that monitors agent threads
AsyncMonitor(futures::sync::mpsc::Sender<Message>)
}
/// DoS prevention mechanisms, which are consisted of distributed agent threads monitored by master threads, identifiable by a systemwide id.
/// The agent threads, at a remote location, will independently detect system anomalies and intervene while notifying master threads,
/// so that there will not be a single point of failure.
/// The master threads, gathered at a single reachable networking endpoint, may participate in DoS prevention from a control plane angle or only record system anomalies.
/// The systemwide configuration is done by replicating the same config file and executable.
pub trait Sprinkler {
/// Build a new sprinkler
fn build(options: SprinklerOptions) -> Self where Self: Sized;
/// Get systemwide id
fn id(&self) -> usize;
/// Get the hostname, where the agent would be deployed
fn hostname(&self) -> &str;
/// Start the master thread, returning a sender (to the master thread) on a intraprocess communication channel
fn activate_master(&self) -> ActivationResult;
/// Start the agent thread
fn activate_agent(&self);
/// Kill the master thread. Note: there is no way to reach out and kill any agent threads.
fn deactivate(&self);
}
/// Sprinkler thread level message format
#[derive(Clone)]
pub struct Message {
pub timestamp: NaiveDateTime,
pub body: String
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum Anomaly {
Negative, // No anomaly has been detected
Positive, // Anomaly has occurred
Fixing(usize), // Has attempted to intervene N times
OutOfControl // Has given up trying because the programmed strategy will not work
}
impl Anomaly {
pub fn get_retry_unchecked(&self) -> usize {
match self {
Anomaly::Negative | Anomaly::Positive => 0,
Anomaly::Fixing(n) => *n,
Anomaly::OutOfControl => std::usize::MAX
}
}
pub fn escalate(&self, max_retry: usize) -> AnomalyTransition {
match self {
Anomaly::Negative => (*self >> Anomaly::Positive).unwrap(),
Anomaly::Positive => (*self >> Anomaly::Fixing(1)).unwrap(),
Anomaly::Fixing(n) => if *n < max_retry {
AnomalyTransition::Fixing
} else {
(*self >> Anomaly::OutOfControl).unwrap()
},
Anomaly::OutOfControl => (*self >> Anomaly::OutOfControl).unwrap(),
}
}
pub fn diminish(&self) -> AnomalyTransition {
(*self >> Anomaly::Negative).unwrap()
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum AnomalyTransition {
Normal, // Negative -> Negative
Occurred, // Negative -> Positive
Unhandled, // Positive -> Positive
Disappeared, // Positive | OutOfControl -> Negative
Fixed, // Fixing(_) -> Negative
Fixing, // Positive -> Fixing(1) || Fixing(n) -> Fixing(n+1)
GaveUp, // Fixing(m) -> OutOfControl
HasGivenUp // OutOfControl -> OutOfControl
}
use std::ops::Shr;
use std::ops::ShrAssign;
impl Shr for Anomaly {
type Output = Option<AnomalyTransition>;
fn shr(self, rhs: Self) -> Option<AnomalyTransition> {
match (self, rhs) {
(Anomaly::Negative, Anomaly::Negative) => Some(AnomalyTransition::Normal),
(Anomaly::Negative, Anomaly::Positive) => Some(AnomalyTransition::Occurred),
(Anomaly::Positive, Anomaly::Positive) => Some(AnomalyTransition::Unhandled),
(Anomaly::Positive, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::Positive, Anomaly::Fixing(1)) => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(i), Anomaly::Fixing(j)) if i+1==j => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(_), Anomaly::Negative) => Some(AnomalyTransition::Fixed),
(Anomaly::Fixing(_), Anomaly::OutOfControl) => Some(AnomalyTransition::GaveUp),
(Anomaly::OutOfControl, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::OutOfControl, Anomaly::OutOfControl) => Some(AnomalyTransition::HasGivenUp),
_ => None
}
}
}
impl Shr<AnomalyTransition> for Anomaly {
type Output = Anomaly;
fn shr(self, rhs: AnomalyTransition) -> Anomaly {
match (self, rhs) {
(Anomaly::Negative, AnomalyTransition::Occurred) => Anomaly::Positive,
(Anomaly::Positive, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::OutOfControl, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::Fixing(_), AnomalyTransition::Fixed) => Anomaly::Negative,
(Anomaly::Positive, AnomalyTransition::Fixing) => Anomaly::Fixing(1),
(Anomaly::Fixing(n), AnomalyTransition::Fixing) => Anomaly::Fixing(n+1),
(Anomaly::Fixing(_), AnomalyTransition::GaveUp) => Anomaly::OutOfControl,
_ => self
}
}
}
impl ShrAssign<AnomalyTransition> for Anomaly {
fn shr_assign(&mut self, rhs: AnomalyTransition) {
let next = *self >> rhs;
*self = next;
}
}
/// Create a TLS acceptor
#[cfg(feature = "master")]
fn init_tls() -> native_tls::Result<tokio_tls::TlsAcceptor> {
let der = include_bytes!("/etc/sprinkler.conf.d/master.p12");
// TODO key loading is hard coded.
let mut keybuffer = Vec::new();
std::fs::File::open("/root/.sprinkler.key").expect("cannot read key").read_to_end(&mut keybuffer).expect("cannot read key");
let cert = native_tls::Identity::from_pkcs12(der, &String::from_utf8_lossy(&keybuffer))?;
Ok(tokio_tls::TlsAcceptor::from(native_tls::TlsAcceptor::builder(cert).build()?))
}
/// Starts a tokio server bound to the specified address
#[cfg(feature = "master")]
pub fn server(addr: &std::net::SocketAddr, switch: &Switch) {
/*
Self-signed cert
openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out sprinkler.crt -keyout sprinkler.key
openssl pkcs12 -export -out identity.p12 -inkey sprinkler.key -in sprinkler.crt
echo "$KEY_PASSWORD" | tr -d '\n' > identity.txt
chown root:root identity.txt
chmod 600 identity.txt
*/
if let Ok(tls_acceptor) = init_tls() {
let listener = tokio::net::TcpListener::bind(addr).expect("unable to bind TCP listener");
let server = listener.incoming()
.map_err(|e| eprintln!("accept failed = {:?}", e))
.for_each({ let switch = switch.clone(); move |s| {
let tls_accept = tls_acceptor
.accept(s)
.and_then({ let switch = switch.clone(); move |s| {
let proto = SprinklerProto::new(s);
let handle_conn = proto.into_future()
.map_err(|(e, _)| e)
.and_then({ let switch = switch.clone(); move |(header, proto)| {
match header {
Some(header) => Either::A(SprinklerRelay{ proto, header, switch }),
None => Either::B(future::ok(())) // Connection dropped?
}
}})
// Task futures have an error of type `()`, this ensures we handle the
// error. We do this by printing the error to STDOUT.
.map_err(|e| {
error!("connection error = {:?}", e);
});
tokio::spawn(handle_conn);
Ok(())
}})
.map_err(|err| {
debug!("TLS accept error: {:?}", err);
});
tokio::spawn(tls_accept)
}});
tokio::spawn(server);
}
else {
error!("cannot initialize tls");
}
}
/// Activates sprinklers agents based on hostname
pub fn agent<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(sprinklers: I) {
if let Ok(hostname) = sys_info::hostname() {
for i in sprinklers.into_iter().filter(|&i| i.hostname() == hostname) {
i.activate_agent();
info!("sprinkler[{}] activated.", i.id());
}
}
else {
error!("Cannot obtain hostname.");
std::process::exit(-1);
}
}
pub fn loop_forever() ->! {
loop { std::thread::sleep(std::time::Duration::from_secs(600)); }
} | Asynchronous(futures::sync::mpsc::Sender<T>)
}
impl<T> Transmitter<T> where T: 'static + Send {
/// Send a message through the underlying Sender | random_line_split |
lib.rs | #![allow(unused_imports)]
#[macro_use]
extern crate log;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use bytes::{BufMut, BytesMut};
use tokio::net::TcpStream;
use byteorder::{ByteOrder, BigEndian};
use futures::try_ready;
use futures::future::Either;
use tokio::prelude::*;
use chrono::naive::NaiveDateTime;
mod commcheck;
pub use commcheck::CommCheck;
#[derive(Clone)]
pub struct SprinklerOptions {
pub heart_beat: u64,
pub retry_delay: u64,
pub master_addr: String,
pub _id: usize,
pub _hostname: String,
}
impl Default for SprinklerOptions {
fn default() -> Self {
SprinklerOptions {
heart_beat: 3,
retry_delay: 20,
master_addr: String::from("localhost"),
_id: 0,
_hostname: String::from("localhost")
}
}
}
/// Sprinkler Builder
pub struct SprinklerBuilder {
params: SprinklerOptions,
counter: usize
}
impl SprinklerBuilder {
pub fn new(params: SprinklerOptions) -> Self {
SprinklerBuilder {
params,
counter: 0
}
}
}
impl SprinklerBuilder {
pub fn build<T: Sprinkler>(&mut self, hostname: String) -> T |
}
#[cfg(feature = "master")]
type EncryptedStream = tokio_tls::TlsStream<TcpStream>;
/// A TCP stream adapter to convert between byte stream and objects
#[cfg(feature = "master")]
#[derive(Debug)]
pub struct SprinklerProto {
socket: EncryptedStream,
read_buffer: BytesMut,
}
#[cfg(feature = "master")]
impl SprinklerProto {
pub fn new(socket: EncryptedStream) -> Self {
SprinklerProto {
socket,
read_buffer: BytesMut::new(),
}
}
/// Update read buffer
fn check(&mut self) -> Poll<(), std::io::Error> {
loop { // Why do I have a loop here? I forgot??
self.read_buffer.reserve(512);
let n = try_ready!(self.socket.read_buf(&mut self.read_buffer));
if n == 0 {
return Ok(Async::Ready(()));
}
}
}
}
/// Encode a message and place it in a write buffer
pub fn compose_message(from: usize, msg: String) -> BytesMut {
let mut write_buffer = BytesMut::new();
write_buffer.reserve(512);
write_buffer.put_u16_be(from as u16);
write_buffer.put_i64_be(chrono::Local::now().timestamp());
write_buffer.put_u16_be(msg.len() as u16);
write_buffer.put(msg);
write_buffer
}
/// Message header
#[derive(Clone, Debug)]
pub struct SprinklerProtoHeader {
id: u16,
timestamp: i64,
len: u16
}
#[cfg(feature = "master")]
impl Stream for SprinklerProto {
type Item = SprinklerProtoHeader;
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let sock_closed = self.check()?.is_ready();
if self.read_buffer.len() > 12 {
Ok(Async::Ready(Some(SprinklerProtoHeader {
id: BigEndian::read_u16(&self.read_buffer.split_to(2)),
timestamp: BigEndian::read_u64(&self.read_buffer.split_to(8)) as i64,
len: BigEndian::read_u16(&self.read_buffer.split_to(2))
})))
}
else {
if sock_closed { Ok(Async::Ready(None)) }
else { Ok(Async::NotReady) }
}
}
}
#[derive(Clone)]
pub enum Transmitter<T> {
/// Synchronous Sender
Synchronous(std::sync::mpsc::Sender<T>),
/// Asynchronous Sender
Asynchronous(futures::sync::mpsc::Sender<T>)
}
impl<T> Transmitter<T> where T:'static + Send {
/// Send a message through the underlying Sender
pub fn send(&self, t: T) -> Result<(), ()> {
match self {
Transmitter::Synchronous(sender) => sender.send(t).map_err(|_| ()),
Transmitter::Asynchronous(sender) => {
tokio::spawn({
let sender = sender.clone();
sender.send(t).into_future().map(|_| ()).map_err(|_| ())
});
Ok(())
}
}
}
}
#[derive(Clone)]
pub struct Switch {
pub inner: Arc<Mutex<HashMap<usize, Transmitter<Message>>>>
}
impl Switch {
pub fn new() -> Self {
Switch { inner: Arc::new(Mutex::new(HashMap::new())) }
}
pub fn connect_all<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(&self, sprinklers: I) {
let mut switch_init = self.inner.lock().unwrap();
for i in sprinklers {
match i.activate_master() {
ActivationResult::RealtimeMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Synchronous(monitor)); },
ActivationResult::AsyncMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Asynchronous(monitor)); }
}
}
}
}
/// Message relay between master threads and TCP sockets connected to remote agents
#[cfg(feature = "master")]
pub struct SprinklerRelay {
pub proto: SprinklerProto,
pub header: SprinklerProtoHeader,
pub switch: Switch
}
#[cfg(feature = "master")]
impl Future for SprinklerRelay {
type Item = ();
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let sock_closed = self.proto.check()?.is_ready();
if self.proto.read_buffer.len() >= self.header.len as usize {
if let Ok(msgbody) = String::from_utf8(self.proto.read_buffer.to_vec()) {
if let Some(tx) = self.switch.inner.lock().unwrap().get(&(self.header.id as usize)) {
if let Err(_) = tx.send(Message{
timestamp: NaiveDateTime::from_timestamp(self.header.timestamp, 0),
body: msgbody
}) {
warn!("Failed to relay the message.");
}
}
Ok(Async::Ready(()))
}
else {
warn!("Failed to decode message.");
Ok(Async::Ready(()))
}
}
else {
if sock_closed {
warn!("Message was lost.");
Ok(Async::Ready(()))
}
else { Ok(Async::NotReady) }
}
}
}
pub enum ActivationResult {
/// A realtime algorithm based master thread that monitors agent threads
RealtimeMonitor(std::sync::mpsc::Sender<Message>),
/// An asynchronous master thread that monitors agent threads
AsyncMonitor(futures::sync::mpsc::Sender<Message>)
}
/// DoS prevention mechanisms, which are consisted of distributed agent threads monitored by master threads, identifiable by a systemwide id.
/// The agent threads, at a remote location, will independently detect system anomalies and intervene while notifying master threads,
/// so that there will not be a single point of failure.
/// The master threads, gathered at a single reachable networking endpoint, may participate in DoS prevention from a control plane angle or only record system anomalies.
/// The systemwide configuration is done by replicating the same config file and executable.
pub trait Sprinkler {
/// Build a new sprinkler
fn build(options: SprinklerOptions) -> Self where Self: Sized;
/// Get systemwide id
fn id(&self) -> usize;
/// Get the hostname, where the agent would be deployed
fn hostname(&self) -> &str;
/// Start the master thread, returning a sender (to the master thread) on a intraprocess communication channel
fn activate_master(&self) -> ActivationResult;
/// Start the agent thread
fn activate_agent(&self);
/// Kill the master thread. Note: there is no way to reach out and kill any agent threads.
fn deactivate(&self);
}
/// Sprinkler thread level message format
#[derive(Clone)]
pub struct Message {
pub timestamp: NaiveDateTime,
pub body: String
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum Anomaly {
Negative, // No anomaly has been detected
Positive, // Anomaly has occurred
Fixing(usize), // Has attempted to intervene N times
OutOfControl // Has given up trying because the programmed strategy will not work
}
impl Anomaly {
pub fn get_retry_unchecked(&self) -> usize {
match self {
Anomaly::Negative | Anomaly::Positive => 0,
Anomaly::Fixing(n) => *n,
Anomaly::OutOfControl => std::usize::MAX
}
}
pub fn escalate(&self, max_retry: usize) -> AnomalyTransition {
match self {
Anomaly::Negative => (*self >> Anomaly::Positive).unwrap(),
Anomaly::Positive => (*self >> Anomaly::Fixing(1)).unwrap(),
Anomaly::Fixing(n) => if *n < max_retry {
AnomalyTransition::Fixing
} else {
(*self >> Anomaly::OutOfControl).unwrap()
},
Anomaly::OutOfControl => (*self >> Anomaly::OutOfControl).unwrap(),
}
}
pub fn diminish(&self) -> AnomalyTransition {
(*self >> Anomaly::Negative).unwrap()
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum AnomalyTransition {
Normal, // Negative -> Negative
Occurred, // Negative -> Positive
Unhandled, // Positive -> Positive
Disappeared, // Positive | OutOfControl -> Negative
Fixed, // Fixing(_) -> Negative
Fixing, // Positive -> Fixing(1) || Fixing(n) -> Fixing(n+1)
GaveUp, // Fixing(m) -> OutOfControl
HasGivenUp // OutOfControl -> OutOfControl
}
use std::ops::Shr;
use std::ops::ShrAssign;
impl Shr for Anomaly {
type Output = Option<AnomalyTransition>;
fn shr(self, rhs: Self) -> Option<AnomalyTransition> {
match (self, rhs) {
(Anomaly::Negative, Anomaly::Negative) => Some(AnomalyTransition::Normal),
(Anomaly::Negative, Anomaly::Positive) => Some(AnomalyTransition::Occurred),
(Anomaly::Positive, Anomaly::Positive) => Some(AnomalyTransition::Unhandled),
(Anomaly::Positive, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::Positive, Anomaly::Fixing(1)) => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(i), Anomaly::Fixing(j)) if i+1==j => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(_), Anomaly::Negative) => Some(AnomalyTransition::Fixed),
(Anomaly::Fixing(_), Anomaly::OutOfControl) => Some(AnomalyTransition::GaveUp),
(Anomaly::OutOfControl, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::OutOfControl, Anomaly::OutOfControl) => Some(AnomalyTransition::HasGivenUp),
_ => None
}
}
}
impl Shr<AnomalyTransition> for Anomaly {
type Output = Anomaly;
fn shr(self, rhs: AnomalyTransition) -> Anomaly {
match (self, rhs) {
(Anomaly::Negative, AnomalyTransition::Occurred) => Anomaly::Positive,
(Anomaly::Positive, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::OutOfControl, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::Fixing(_), AnomalyTransition::Fixed) => Anomaly::Negative,
(Anomaly::Positive, AnomalyTransition::Fixing) => Anomaly::Fixing(1),
(Anomaly::Fixing(n), AnomalyTransition::Fixing) => Anomaly::Fixing(n+1),
(Anomaly::Fixing(_), AnomalyTransition::GaveUp) => Anomaly::OutOfControl,
_ => self
}
}
}
impl ShrAssign<AnomalyTransition> for Anomaly {
fn shr_assign(&mut self, rhs: AnomalyTransition) {
let next = *self >> rhs;
*self = next;
}
}
/// Create a TLS acceptor
#[cfg(feature = "master")]
fn init_tls() -> native_tls::Result<tokio_tls::TlsAcceptor> {
let der = include_bytes!("/etc/sprinkler.conf.d/master.p12");
// TODO key loading is hard coded.
let mut keybuffer = Vec::new();
std::fs::File::open("/root/.sprinkler.key").expect("cannot read key").read_to_end(&mut keybuffer).expect("cannot read key");
let cert = native_tls::Identity::from_pkcs12(der, &String::from_utf8_lossy(&keybuffer))?;
Ok(tokio_tls::TlsAcceptor::from(native_tls::TlsAcceptor::builder(cert).build()?))
}
/// Starts a tokio server bound to the specified address
#[cfg(feature = "master")]
pub fn server(addr: &std::net::SocketAddr, switch: &Switch) {
/*
Self-signed cert
openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out sprinkler.crt -keyout sprinkler.key
openssl pkcs12 -export -out identity.p12 -inkey sprinkler.key -in sprinkler.crt
echo "$KEY_PASSWORD" | tr -d '\n' > identity.txt
chown root:root identity.txt
chmod 600 identity.txt
*/
if let Ok(tls_acceptor) = init_tls() {
let listener = tokio::net::TcpListener::bind(addr).expect("unable to bind TCP listener");
let server = listener.incoming()
.map_err(|e| eprintln!("accept failed = {:?}", e))
.for_each({ let switch = switch.clone(); move |s| {
let tls_accept = tls_acceptor
.accept(s)
.and_then({ let switch = switch.clone(); move |s| {
let proto = SprinklerProto::new(s);
let handle_conn = proto.into_future()
.map_err(|(e, _)| e)
.and_then({ let switch = switch.clone(); move |(header, proto)| {
match header {
Some(header) => Either::A(SprinklerRelay{ proto, header, switch }),
None => Either::B(future::ok(())) // Connection dropped?
}
}})
// Task futures have an error of type `()`, this ensures we handle the
// error. We do this by printing the error to STDOUT.
.map_err(|e| {
error!("connection error = {:?}", e);
});
tokio::spawn(handle_conn);
Ok(())
}})
.map_err(|err| {
debug!("TLS accept error: {:?}", err);
});
tokio::spawn(tls_accept)
}});
tokio::spawn(server);
}
else {
error!("cannot initialize tls");
}
}
/// Activates sprinklers agents based on hostname
pub fn agent<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(sprinklers: I) {
if let Ok(hostname) = sys_info::hostname() {
for i in sprinklers.into_iter().filter(|&i| i.hostname() == hostname) {
i.activate_agent();
info!("sprinkler[{}] activated.", i.id());
}
}
else {
error!("Cannot obtain hostname.");
std::process::exit(-1);
}
}
pub fn loop_forever() ->! {
loop { std::thread::sleep(std::time::Duration::from_secs(600)); }
}
| {
let next = self.counter;
self.counter += 1;
T::build(SprinklerOptions {
_id: next,
_hostname: hostname,
..self.params.clone()
})
} | identifier_body |
lib.rs | #![allow(unused_imports)]
#[macro_use]
extern crate log;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use bytes::{BufMut, BytesMut};
use tokio::net::TcpStream;
use byteorder::{ByteOrder, BigEndian};
use futures::try_ready;
use futures::future::Either;
use tokio::prelude::*;
use chrono::naive::NaiveDateTime;
mod commcheck;
pub use commcheck::CommCheck;
#[derive(Clone)]
pub struct SprinklerOptions {
pub heart_beat: u64,
pub retry_delay: u64,
pub master_addr: String,
pub _id: usize,
pub _hostname: String,
}
impl Default for SprinklerOptions {
fn default() -> Self {
SprinklerOptions {
heart_beat: 3,
retry_delay: 20,
master_addr: String::from("localhost"),
_id: 0,
_hostname: String::from("localhost")
}
}
}
/// Sprinkler Builder
pub struct SprinklerBuilder {
params: SprinklerOptions,
counter: usize
}
impl SprinklerBuilder {
pub fn new(params: SprinklerOptions) -> Self {
SprinklerBuilder {
params,
counter: 0
}
}
}
impl SprinklerBuilder {
pub fn build<T: Sprinkler>(&mut self, hostname: String) -> T {
let next = self.counter;
self.counter += 1;
T::build(SprinklerOptions {
_id: next,
_hostname: hostname,
..self.params.clone()
})
}
}
#[cfg(feature = "master")]
type EncryptedStream = tokio_tls::TlsStream<TcpStream>;
/// A TCP stream adapter to convert between byte stream and objects
#[cfg(feature = "master")]
#[derive(Debug)]
pub struct SprinklerProto {
socket: EncryptedStream,
read_buffer: BytesMut,
}
#[cfg(feature = "master")]
impl SprinklerProto {
pub fn new(socket: EncryptedStream) -> Self {
SprinklerProto {
socket,
read_buffer: BytesMut::new(),
}
}
/// Update read buffer
fn check(&mut self) -> Poll<(), std::io::Error> {
loop { // Why do I have a loop here? I forgot??
self.read_buffer.reserve(512);
let n = try_ready!(self.socket.read_buf(&mut self.read_buffer));
if n == 0 {
return Ok(Async::Ready(()));
}
}
}
}
/// Encode a message and place it in a write buffer
pub fn compose_message(from: usize, msg: String) -> BytesMut {
let mut write_buffer = BytesMut::new();
write_buffer.reserve(512);
write_buffer.put_u16_be(from as u16);
write_buffer.put_i64_be(chrono::Local::now().timestamp());
write_buffer.put_u16_be(msg.len() as u16);
write_buffer.put(msg);
write_buffer
}
/// Message header
#[derive(Clone, Debug)]
pub struct SprinklerProtoHeader {
id: u16,
timestamp: i64,
len: u16
}
#[cfg(feature = "master")]
impl Stream for SprinklerProto {
type Item = SprinklerProtoHeader;
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let sock_closed = self.check()?.is_ready();
if self.read_buffer.len() > 12 {
Ok(Async::Ready(Some(SprinklerProtoHeader {
id: BigEndian::read_u16(&self.read_buffer.split_to(2)),
timestamp: BigEndian::read_u64(&self.read_buffer.split_to(8)) as i64,
len: BigEndian::read_u16(&self.read_buffer.split_to(2))
})))
}
else {
if sock_closed { Ok(Async::Ready(None)) }
else { Ok(Async::NotReady) }
}
}
}
#[derive(Clone)]
pub enum Transmitter<T> {
/// Synchronous Sender
Synchronous(std::sync::mpsc::Sender<T>),
/// Asynchronous Sender
Asynchronous(futures::sync::mpsc::Sender<T>)
}
impl<T> Transmitter<T> where T:'static + Send {
/// Send a message through the underlying Sender
pub fn send(&self, t: T) -> Result<(), ()> {
match self {
Transmitter::Synchronous(sender) => sender.send(t).map_err(|_| ()),
Transmitter::Asynchronous(sender) => {
tokio::spawn({
let sender = sender.clone();
sender.send(t).into_future().map(|_| ()).map_err(|_| ())
});
Ok(())
}
}
}
}
#[derive(Clone)]
pub struct Switch {
pub inner: Arc<Mutex<HashMap<usize, Transmitter<Message>>>>
}
impl Switch {
pub fn new() -> Self {
Switch { inner: Arc::new(Mutex::new(HashMap::new())) }
}
pub fn connect_all<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(&self, sprinklers: I) {
let mut switch_init = self.inner.lock().unwrap();
for i in sprinklers {
match i.activate_master() {
ActivationResult::RealtimeMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Synchronous(monitor)); },
ActivationResult::AsyncMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Asynchronous(monitor)); }
}
}
}
}
/// Message relay between master threads and TCP sockets connected to remote agents
#[cfg(feature = "master")]
pub struct SprinklerRelay {
pub proto: SprinklerProto,
pub header: SprinklerProtoHeader,
pub switch: Switch
}
#[cfg(feature = "master")]
impl Future for SprinklerRelay {
type Item = ();
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let sock_closed = self.proto.check()?.is_ready();
if self.proto.read_buffer.len() >= self.header.len as usize {
if let Ok(msgbody) = String::from_utf8(self.proto.read_buffer.to_vec()) {
if let Some(tx) = self.switch.inner.lock().unwrap().get(&(self.header.id as usize)) {
if let Err(_) = tx.send(Message{
timestamp: NaiveDateTime::from_timestamp(self.header.timestamp, 0),
body: msgbody
}) {
warn!("Failed to relay the message.");
}
}
Ok(Async::Ready(()))
}
else {
warn!("Failed to decode message.");
Ok(Async::Ready(()))
}
}
else {
if sock_closed {
warn!("Message was lost.");
Ok(Async::Ready(()))
}
else { Ok(Async::NotReady) }
}
}
}
pub enum ActivationResult {
/// A realtime algorithm based master thread that monitors agent threads
RealtimeMonitor(std::sync::mpsc::Sender<Message>),
/// An asynchronous master thread that monitors agent threads
AsyncMonitor(futures::sync::mpsc::Sender<Message>)
}
/// DoS prevention mechanisms, which are consisted of distributed agent threads monitored by master threads, identifiable by a systemwide id.
/// The agent threads, at a remote location, will independently detect system anomalies and intervene while notifying master threads,
/// so that there will not be a single point of failure.
/// The master threads, gathered at a single reachable networking endpoint, may participate in DoS prevention from a control plane angle or only record system anomalies.
/// The systemwide configuration is done by replicating the same config file and executable.
pub trait Sprinkler {
/// Build a new sprinkler
fn build(options: SprinklerOptions) -> Self where Self: Sized;
/// Get systemwide id
fn id(&self) -> usize;
/// Get the hostname, where the agent would be deployed
fn hostname(&self) -> &str;
/// Start the master thread, returning a sender (to the master thread) on a intraprocess communication channel
fn activate_master(&self) -> ActivationResult;
/// Start the agent thread
fn activate_agent(&self);
/// Kill the master thread. Note: there is no way to reach out and kill any agent threads.
fn deactivate(&self);
}
/// Sprinkler thread level message format
#[derive(Clone)]
pub struct Message {
pub timestamp: NaiveDateTime,
pub body: String
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum Anomaly {
Negative, // No anomaly has been detected
Positive, // Anomaly has occurred
Fixing(usize), // Has attempted to intervene N times
OutOfControl // Has given up trying because the programmed strategy will not work
}
impl Anomaly {
pub fn get_retry_unchecked(&self) -> usize {
match self {
Anomaly::Negative | Anomaly::Positive => 0,
Anomaly::Fixing(n) => *n,
Anomaly::OutOfControl => std::usize::MAX
}
}
pub fn escalate(&self, max_retry: usize) -> AnomalyTransition {
match self {
Anomaly::Negative => (*self >> Anomaly::Positive).unwrap(),
Anomaly::Positive => (*self >> Anomaly::Fixing(1)).unwrap(),
Anomaly::Fixing(n) => if *n < max_retry {
AnomalyTransition::Fixing
} else {
(*self >> Anomaly::OutOfControl).unwrap()
},
Anomaly::OutOfControl => (*self >> Anomaly::OutOfControl).unwrap(),
}
}
pub fn | (&self) -> AnomalyTransition {
(*self >> Anomaly::Negative).unwrap()
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum AnomalyTransition {
Normal, // Negative -> Negative
Occurred, // Negative -> Positive
Unhandled, // Positive -> Positive
Disappeared, // Positive | OutOfControl -> Negative
Fixed, // Fixing(_) -> Negative
Fixing, // Positive -> Fixing(1) || Fixing(n) -> Fixing(n+1)
GaveUp, // Fixing(m) -> OutOfControl
HasGivenUp // OutOfControl -> OutOfControl
}
use std::ops::Shr;
use std::ops::ShrAssign;
impl Shr for Anomaly {
type Output = Option<AnomalyTransition>;
fn shr(self, rhs: Self) -> Option<AnomalyTransition> {
match (self, rhs) {
(Anomaly::Negative, Anomaly::Negative) => Some(AnomalyTransition::Normal),
(Anomaly::Negative, Anomaly::Positive) => Some(AnomalyTransition::Occurred),
(Anomaly::Positive, Anomaly::Positive) => Some(AnomalyTransition::Unhandled),
(Anomaly::Positive, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::Positive, Anomaly::Fixing(1)) => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(i), Anomaly::Fixing(j)) if i+1==j => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(_), Anomaly::Negative) => Some(AnomalyTransition::Fixed),
(Anomaly::Fixing(_), Anomaly::OutOfControl) => Some(AnomalyTransition::GaveUp),
(Anomaly::OutOfControl, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::OutOfControl, Anomaly::OutOfControl) => Some(AnomalyTransition::HasGivenUp),
_ => None
}
}
}
impl Shr<AnomalyTransition> for Anomaly {
type Output = Anomaly;
fn shr(self, rhs: AnomalyTransition) -> Anomaly {
match (self, rhs) {
(Anomaly::Negative, AnomalyTransition::Occurred) => Anomaly::Positive,
(Anomaly::Positive, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::OutOfControl, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::Fixing(_), AnomalyTransition::Fixed) => Anomaly::Negative,
(Anomaly::Positive, AnomalyTransition::Fixing) => Anomaly::Fixing(1),
(Anomaly::Fixing(n), AnomalyTransition::Fixing) => Anomaly::Fixing(n+1),
(Anomaly::Fixing(_), AnomalyTransition::GaveUp) => Anomaly::OutOfControl,
_ => self
}
}
}
impl ShrAssign<AnomalyTransition> for Anomaly {
fn shr_assign(&mut self, rhs: AnomalyTransition) {
let next = *self >> rhs;
*self = next;
}
}
/// Create a TLS acceptor
#[cfg(feature = "master")]
fn init_tls() -> native_tls::Result<tokio_tls::TlsAcceptor> {
let der = include_bytes!("/etc/sprinkler.conf.d/master.p12");
// TODO key loading is hard coded.
let mut keybuffer = Vec::new();
std::fs::File::open("/root/.sprinkler.key").expect("cannot read key").read_to_end(&mut keybuffer).expect("cannot read key");
let cert = native_tls::Identity::from_pkcs12(der, &String::from_utf8_lossy(&keybuffer))?;
Ok(tokio_tls::TlsAcceptor::from(native_tls::TlsAcceptor::builder(cert).build()?))
}
/// Starts a tokio server bound to the specified address
#[cfg(feature = "master")]
pub fn server(addr: &std::net::SocketAddr, switch: &Switch) {
/*
Self-signed cert
openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out sprinkler.crt -keyout sprinkler.key
openssl pkcs12 -export -out identity.p12 -inkey sprinkler.key -in sprinkler.crt
echo "$KEY_PASSWORD" | tr -d '\n' > identity.txt
chown root:root identity.txt
chmod 600 identity.txt
*/
if let Ok(tls_acceptor) = init_tls() {
let listener = tokio::net::TcpListener::bind(addr).expect("unable to bind TCP listener");
let server = listener.incoming()
.map_err(|e| eprintln!("accept failed = {:?}", e))
.for_each({ let switch = switch.clone(); move |s| {
let tls_accept = tls_acceptor
.accept(s)
.and_then({ let switch = switch.clone(); move |s| {
let proto = SprinklerProto::new(s);
let handle_conn = proto.into_future()
.map_err(|(e, _)| e)
.and_then({ let switch = switch.clone(); move |(header, proto)| {
match header {
Some(header) => Either::A(SprinklerRelay{ proto, header, switch }),
None => Either::B(future::ok(())) // Connection dropped?
}
}})
// Task futures have an error of type `()`, this ensures we handle the
// error. We do this by printing the error to STDOUT.
.map_err(|e| {
error!("connection error = {:?}", e);
});
tokio::spawn(handle_conn);
Ok(())
}})
.map_err(|err| {
debug!("TLS accept error: {:?}", err);
});
tokio::spawn(tls_accept)
}});
tokio::spawn(server);
}
else {
error!("cannot initialize tls");
}
}
/// Activates sprinklers agents based on hostname
pub fn agent<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(sprinklers: I) {
if let Ok(hostname) = sys_info::hostname() {
for i in sprinklers.into_iter().filter(|&i| i.hostname() == hostname) {
i.activate_agent();
info!("sprinkler[{}] activated.", i.id());
}
}
else {
error!("Cannot obtain hostname.");
std::process::exit(-1);
}
}
pub fn loop_forever() ->! {
loop { std::thread::sleep(std::time::Duration::from_secs(600)); }
}
| diminish | identifier_name |
lib.rs | bail, Result};
use crossbeam::channel::Sender;
use glob::glob;
use log::{info, warn};
use scan_fmt::scan_fmt;
use simplelog as sl;
use std::cell::RefCell;
use std::collections::HashMap;
use std::env;
use std::ffi::{CString, OsStr, OsString};
use std::fmt::Write as FmtWrite;
use std::fs;
use std::io::prelude::*;
use std::io::BufReader;
use std::mem::size_of;
use std::os::linux::fs::MetadataExt as LinuxME;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::fs::MetadataExt as UnixME;
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process::{self, Command};
use std::sync::{atomic, Condvar, Mutex};
use std::thread_local;
use std::time::{Duration, UNIX_EPOCH};
use sysinfo::{self, SystemExt};
pub mod iocost;
pub mod journal_tailer;
pub mod json_file;
pub mod storage_info;
pub mod systemd;
pub use iocost::{IoCostModelParams, IoCostQoSParams, IoCostSysSave};
pub use journal_tailer::*;
pub use json_file::{
JsonArgs, JsonArgsHelper, JsonConfigFile, JsonLoad, JsonRawFile, JsonReportFile, JsonSave,
};
pub use storage_info::*;
pub use systemd::TransientService;
pub const TO_MSEC: f64 = 1000.0;
pub const TO_PCT: f64 = 100.0;
pub const MSEC: f64 = 1.0 / 1000.0;
lazy_static::lazy_static! {
pub static ref TOTAL_SYSTEM_MEMORY: usize = {
let mut sys = sysinfo::System::new();
sys.refresh_memory();
sys.get_total_memory() as usize * 1024
};
pub static ref TOTAL_SYSTEM_SWAP: usize = {
let mut sys = sysinfo::System::new();
sys.refresh_memory();
sys.get_total_swap() as usize * 1024
};
pub static ref NR_SYSTEM_CPUS: usize = ::num_cpus::get();
static ref TOTAL_MEMORY: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
static ref TOTAL_SWAP: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
static ref NR_CPUS: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
pub static ref PAGE_SIZE: usize = ::page_size::get();
pub static ref ROTATIONAL_SWAP: bool = storage_info::is_swap_rotational();
pub static ref IS_FB_PROD: bool = {
match glob("/sys/fs/cgroup/**/fbagentd.service")
.unwrap()
.filter_map(|x| x.ok())
.next()
{
Some(_) => {
warn!("FB PROD detected, default parameters will be adjusted");
true
}
None => false,
}
};
}
pub fn total_memory() -> usize {
match TOTAL_MEMORY.load(atomic::Ordering::Relaxed) {
0 => *TOTAL_SYSTEM_MEMORY,
v => v,
}
}
pub fn total_swap() -> usize {
match TOTAL_SWAP.load(atomic::Ordering::Relaxed) {
0 => *TOTAL_SYSTEM_SWAP,
v => v,
}
}
pub fn nr_cpus() -> usize {
match NR_CPUS.load(atomic::Ordering::Relaxed) {
0 => *NR_SYSTEM_CPUS,
v => v,
}
}
pub fn override_system_configuration(
total_memory: Option<usize>,
total_swap: Option<usize>,
nr_cpus: Option<usize>,
) {
let total_memory = total_memory.unwrap_or(0);
let total_swap = total_swap.unwrap_or(0);
let nr_cpus = nr_cpus.unwrap_or(0);
TOTAL_MEMORY.store(total_memory, atomic::Ordering::Relaxed);
TOTAL_SWAP.store(total_swap, atomic::Ordering::Relaxed);
NR_CPUS.store(nr_cpus, atomic::Ordering::Relaxed);
let mut buf = String::new();
if total_memory > 0 {
write!(
buf,
" memory={}->{}",
format_size(*TOTAL_SYSTEM_MEMORY),
format_size(total_memory)
)
.unwrap();
}
if total_swap > 0 {
write!(
buf,
" swap={}->{}",
format_size(*TOTAL_SYSTEM_SWAP),
format_size(total_swap)
)
.unwrap();
}
if nr_cpus > 0 {
write!(buf, " cpus={}->{}", *NR_SYSTEM_CPUS, nr_cpus).unwrap();
}
if buf.len() > 0 {
info!("System configuration overrides:{}", &buf);
}
}
pub fn to_gb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 30) as f64
}
pub fn to_mb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 20) as f64
}
pub fn to_kb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 10) as f64
}
pub fn scale_ratio<T>(ratio: f64, (left, mid, right): (T, T, T)) -> T
where
T: PartialOrd + num::FromPrimitive + num::ToPrimitive,
{
let (left_f64, mid_f64, right_f64) = (
left.to_f64().unwrap(),
mid.to_f64().unwrap(),
right.to_f64().unwrap(),
);
let v = if ratio < 0.5 {
left_f64 + (mid_f64 - left_f64) * ratio / 0.5
} else {
mid_f64 + (right_f64 - mid_f64) * (ratio - 0.5) / 0.5
};
num::clamp(T::from_f64(v).unwrap(), left, right)
}
fn format_size_internal<T>(size: T, zero: &str) -> String
where
T: num::ToPrimitive,
{
let format_size_helper = |size: u64, shift: u32, suffix: &str| -> Option<String> {
let unit: u64 = 1 << shift;
if size < unit {
Some(zero.to_string())
} else if size < 100 * unit {
Some(format!("{:.1}{}", size as f64 / unit as f64, suffix))
} else if size < 1024 * unit {
Some(format!("{:}{}", size / unit, suffix))
} else {
None
}
};
let size = size.to_u64().unwrap();
format_size_helper(size, 0, "B")
.or_else(|| format_size_helper(size, 10, "K"))
.or_else(|| format_size_helper(size, 20, "M"))
.or_else(|| format_size_helper(size, 30, "G"))
.or_else(|| format_size_helper(size, 40, "P"))
.or_else(|| format_size_helper(size, 50, "E"))
.unwrap_or_else(|| "INF".into())
}
pub fn format_size<T>(size: T) -> String
where
T: num::ToPrimitive,
{
format_size_internal(size, "0")
}
pub fn format_size_dashed<T>(size: T) -> String
where
T: num::ToPrimitive,
{
format_size_internal(size, "-")
}
fn format_duration_internal(dur: f64, zero: &str) -> String {
let format_nsecs_helper = |nsecs: u64, unit: u64, max: u64, suffix: &str| -> Option<String> {
if nsecs < unit {
Some(zero.to_string())
} else if nsecs < 100 * unit {
Some(format!("{:.1}{}", nsecs as f64 / unit as f64, suffix))
} else if nsecs < max * unit {
Some(format!("{:}{}", nsecs / unit, suffix))
} else {
None
}
};
let nsecs = (dur * 1_000_000_000.0).round() as u64;
format_nsecs_helper(nsecs, 10_u64.pow(0), 1000, "n")
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(3), 1000, "u"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(6), 1000, "m"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9), 60, "s"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60, 60, "M"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60, 24, "H"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60 * 24, 365, "D"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60 * 24 * 365, 1000, "Y"))
.unwrap_or_else(|| "INF".into())
}
pub fn format_duration(dur: f64) -> String {
format_duration_internal(dur, "0")
}
pub fn format_duration_dashed(dur: f64) -> String {
format_duration_internal(dur, "-")
}
fn format_pct_internal(ratio: f64, zero: &str) -> String {
if ratio == 0.0 {
zero.to_string()
} else if ratio > 0.99 && ratio <= 9.99 {
format!("{:3.0}", ratio * 100.0)
} else if ratio > 9.99 {
"INF".into()
} else {
format!("{:.01}", ratio * 100.0)
}
}
pub fn format_pct(ratio: f64) -> String {
format_pct_internal(ratio, "0")
}
pub fn format_pct_dashed(ratio: f64) -> String {
format_pct_internal(ratio, "-")
}
pub fn parse_duration(input: &str) -> Result<f64> {
lazy_static::lazy_static! {
static ref UNITS: HashMap<char, f64> = [
('n', 0.000_000_001),
('u', 0.000_001),
('m', 0.001),
('s', 1.0),
('M', 60.0),
('H', 3600.0),
('D', 3600.0 * 24.0),
('Y', 3600.0 * 24.0 * 365.0),
]
.iter()
.cloned()
.collect();
}
let mut num = String::new();
let mut sum = 0.0;
for ch in input.chars() {
if UNITS.contains_key(&ch) {
sum += num.trim().parse::<f64>()? * UNITS[&ch];
num.clear();
} else {
num.push(ch);
}
}
if num.trim().len() > 0 {
sum += num.trim().parse::<f64>()?;
}
Ok(sum)
}
fn is_executable<P: AsRef<Path>>(path_in: P) -> bool {
let path = path_in.as_ref();
match path.metadata() {
Ok(md) => md.is_file() && md.mode() & 0o111!= 0,
Err(_) => false,
}
}
pub fn exe_dir() -> Result<PathBuf> {
let mut path = env::current_exe()?;
path.pop();
Ok(path)
}
pub fn find_bin<N: AsRef<OsStr>, P: AsRef<OsStr>>(
name_in: N,
prepend_in: Option<P>,
) -> Option<PathBuf> {
let name = name_in.as_ref();
let mut search = OsString::new();
if let Some(prepend) = prepend_in.as_ref() {
search.push(prepend);
search.push(":");
}
if let Some(dirs) = env::var_os("PATH") {
search.push(dirs);
}
for dir in env::split_paths(&search) { | let mut path = dir.to_owned();
path.push(name);
if let Ok(path) = path.canonicalize() {
if is_executable(&path) {
return Some(path);
}
}
}
None
}
pub fn chgrp<P: AsRef<Path>>(path_in: P, gid: u32) -> Result<bool> {
let path = path_in.as_ref();
let md = fs::metadata(path)?;
if md.st_gid()!= gid {
let cpath = CString::new(path.as_os_str().as_bytes())?;
if unsafe { libc::chown(cpath.as_ptr(), md.st_uid(), gid) } < 0 {
bail!("Failed to chgrp {:?} to {} ({:?})", path, gid, unsafe {
*libc::__errno_location()
});
}
Ok(true)
} else {
Ok(false)
}
}
pub fn set_sgid<P: AsRef<Path>>(path_in: P) -> Result<bool> {
let path = path_in.as_ref();
let md = fs::metadata(path)?;
let mut perm = md.permissions();
if perm.mode() & 0o2000 == 0 {
perm.set_mode(perm.mode() | 0o2000);
fs::set_permissions(path, perm)?;
Ok(true)
} else {
Ok(false)
}
}
pub fn read_one_line<P: AsRef<Path>>(path: P) -> Result<String> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
Ok(r.lines().next().ok_or(anyhow!("File empty"))??)
}
pub fn write_one_line<P: AsRef<Path>>(path: P, line: &str) -> Result<()> {
let mut f = fs::OpenOptions::new().write(true).open(path)?;
Ok(f.write_all(line.as_ref())?)
}
pub fn unix_now() -> u64 {
UNIX_EPOCH.elapsed().unwrap().as_secs()
}
pub fn init_logging(verbosity: u32) {
if std::env::var("RUST_LOG").is_ok() {
env_logger::init();
} else {
let sl_level = match verbosity {
0 | 1 => sl::LevelFilter::Info,
2 => sl::LevelFilter::Debug,
_ => sl::LevelFilter::Trace,
};
let mut lcfg = sl::ConfigBuilder::new();
lcfg.set_time_level(sl::LevelFilter::Off)
.set_location_level(sl::LevelFilter::Off)
.set_target_level(sl::LevelFilter::Off)
.set_thread_level(sl::LevelFilter::Off);
if!console::user_attended_stderr()
|| sl::TermLogger::init(sl_level, lcfg.build(), sl::TerminalMode::Stderr).is_err()
{
sl::SimpleLogger::init(sl_level, lcfg.build()).unwrap();
}
}
}
pub fn child_reader_thread(name: String, stdout: process::ChildStdout, tx: Sender<String>) {
let reader = BufReader::new(stdout);
for line in reader.lines() {
match line {
Ok(line) => {
if let Err(e) = tx.send(line) {
info!("{}: Reader thread terminating ({:?})", &name, &e);
break;
}
}
Err(e) => {
warn!("{}: Failed to read from journalctl ({:?})", &name, &e);
break;
}
}
}
}
pub fn run_command(cmd: &mut Command, emsg: &str) -> Result<()> {
let cmd_str = format!("{:?}", &cmd);
match cmd.status() {
Ok(rc) if rc.success() => Ok(()),
Ok(rc) => bail!("{:?} ({:?}): {}", &cmd_str, &rc, emsg,),
Err(e) => bail!("{:?} ({:?}): {}", &cmd_str, &e, emsg,),
}
}
pub fn fill_area_with_random<T, R: rand::Rng +?Sized>(area: &mut [T], comp: f64, rng: &mut R) {
let area = unsafe {
std::slice::from_raw_parts_mut(
std::mem::transmute::<*mut T, *mut u64>(area.as_mut_ptr()),
area.len() * size_of::<T>() / size_of::<u64>(),
)
};
const BLOCK_SIZE: usize = 512;
const WORDS_PER_BLOCK: usize = BLOCK_SIZE / size_of::<u64>();
let rands_per_block = (((WORDS_PER_BLOCK as f64) * (1.0 - comp)) as usize).min(WORDS_PER_BLOCK);
let last_first = area[0];
for i in 0..area.len() {
area[i] = if i % WORDS_PER_BLOCK < rands_per_block {
rng.gen()
} else {
0
};
}
// guarantee that the first word doesn't stay the same
if area[0] == last_first {
area[0] += 1;
}
}
pub fn read_cgroup_flat_keyed_file(path: &str) -> Result<HashMap<String, u64>> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
let mut map = HashMap::new();
for line in r.lines().filter_map(Result::ok) {
if let Ok((key, val)) = scan_fmt!(&line, "{} {d}", String, u64) {
map.insert(key, val);
}
}
Ok(map)
}
pub fn read_cgroup_nested_keyed_file(
path: &str,
) -> Result<HashMap<String, HashMap<String, String>>> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
let mut top_map = HashMap::new();
for line in r.lines().filter_map(Result::ok) {
let mut split = line.split_whitespace();
let top_key = split.next().unwrap();
let mut map = HashMap::new();
for tok in split {
if let Ok((key, val)) = scan_fmt!(tok, "{}={}", String, String) {
map.insert(key, val);
}
}
top_map.insert(top_key.into(), map);
}
Ok(top_map)
}
struct GlobalProgState {
exiting: bool,
kick_seq: u64,
}
lazy_static::lazy_static! {
static ref PROG_STATE: Mutex<GlobalProgState> = Mutex::new(GlobalProgState {
exiting: false,
kick_seq: 1
});
static ref PROG_WAITQ: Condvar = Condvar::new();
}
thread_local! {
static LOCAL_KICK_SEQ: RefCell<u64> = RefCell::new(0);
}
pub fn setup_prog_state() {
ctrlc::set_handler(move || {
info!("SIGINT/TERM received, exiting...");
set_prog_exiting();
})
.expect("Error setting term handler");
}
pub fn set_prog_exiting() {
PROG_STATE.lock().unwrap().exiting = true;
PROG_WAITQ.notify_all();
}
pub fn prog_exiting() -> bool {
PROG_STATE.lock().unwrap().exiting
}
pub fn prog_kick() {
PROG_STATE.lock().unwrap().kick_seq += 1;
PROG_WAITQ.notify_all();
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ProgState {
Running,
Exiting,
Kicked,
}
pub fn wait_prog_state(dur: Duration) -> ProgState {
let mut first = true;
let mut state = PROG_STATE.lock().unwrap();
loop {
if state.exiting {
return ProgState::Exiting;
}
if LOCAL_KICK_SEQ.with(|seq| {
if *seq.borrow() < state.kick_seq {
*seq.borrow_mut() = state.kick_seq;
true
} else {
false
}
}) {
return ProgState::Kicked;
}
if first {
state = PROG_WAITQ.wait_timeout(state, dur).unwrap().0;
first = false;
} else {
return ProgState::Running;
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_format_duration() {
for pair in &[
(0.000003932, "3.9u"),
(0.00448, "4.5m"),
(0.3, "300m"),
(2042.0, "34.0M"),
(3456000.0, "40.0D"),
(60480000.0, "1.9Y"),
] {
let result = super::format_duration(pair.0);
assert_eq!(&result, pair.1);
println!("{} -> {} ({})", pair.0, &result, pair.1);
}
}
#[test]
fn test_parse_duration() {
for pair in &[
(0.0000039, "3.9u"),
(0.0044, "4.4m"),
(0.3, "300m"),
(2040.0, "34.0M"),
(3456000.0, "40.0D"),
(59918400.0, "1.9Y"),
(59918401.1, "1.9Y1s100m"),
(59918401.1, "1.9Y1.1s"),
(59918401.102, "1.9Y 1.1s 2000 u"),
(1.27, "1.27"),
(1.37, "100m1.27"),
] {
let result = super::parse_duration(pair.1).unwrap();
assert_eq!(pair. | random_line_split |
|
lib.rs | , Result};
use crossbeam::channel::Sender;
use glob::glob;
use log::{info, warn};
use scan_fmt::scan_fmt;
use simplelog as sl;
use std::cell::RefCell;
use std::collections::HashMap;
use std::env;
use std::ffi::{CString, OsStr, OsString};
use std::fmt::Write as FmtWrite;
use std::fs;
use std::io::prelude::*;
use std::io::BufReader;
use std::mem::size_of;
use std::os::linux::fs::MetadataExt as LinuxME;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::fs::MetadataExt as UnixME;
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process::{self, Command};
use std::sync::{atomic, Condvar, Mutex};
use std::thread_local;
use std::time::{Duration, UNIX_EPOCH};
use sysinfo::{self, SystemExt};
pub mod iocost;
pub mod journal_tailer;
pub mod json_file;
pub mod storage_info;
pub mod systemd;
pub use iocost::{IoCostModelParams, IoCostQoSParams, IoCostSysSave};
pub use journal_tailer::*;
pub use json_file::{
JsonArgs, JsonArgsHelper, JsonConfigFile, JsonLoad, JsonRawFile, JsonReportFile, JsonSave,
};
pub use storage_info::*;
pub use systemd::TransientService;
pub const TO_MSEC: f64 = 1000.0;
pub const TO_PCT: f64 = 100.0;
pub const MSEC: f64 = 1.0 / 1000.0;
lazy_static::lazy_static! {
pub static ref TOTAL_SYSTEM_MEMORY: usize = {
let mut sys = sysinfo::System::new();
sys.refresh_memory();
sys.get_total_memory() as usize * 1024
};
pub static ref TOTAL_SYSTEM_SWAP: usize = {
let mut sys = sysinfo::System::new();
sys.refresh_memory();
sys.get_total_swap() as usize * 1024
};
pub static ref NR_SYSTEM_CPUS: usize = ::num_cpus::get();
static ref TOTAL_MEMORY: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
static ref TOTAL_SWAP: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
static ref NR_CPUS: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
pub static ref PAGE_SIZE: usize = ::page_size::get();
pub static ref ROTATIONAL_SWAP: bool = storage_info::is_swap_rotational();
pub static ref IS_FB_PROD: bool = {
match glob("/sys/fs/cgroup/**/fbagentd.service")
.unwrap()
.filter_map(|x| x.ok())
.next()
{
Some(_) => {
warn!("FB PROD detected, default parameters will be adjusted");
true
}
None => false,
}
};
}
pub fn total_memory() -> usize {
match TOTAL_MEMORY.load(atomic::Ordering::Relaxed) {
0 => *TOTAL_SYSTEM_MEMORY,
v => v,
}
}
pub fn total_swap() -> usize {
match TOTAL_SWAP.load(atomic::Ordering::Relaxed) {
0 => *TOTAL_SYSTEM_SWAP,
v => v,
}
}
pub fn nr_cpus() -> usize {
match NR_CPUS.load(atomic::Ordering::Relaxed) {
0 => *NR_SYSTEM_CPUS,
v => v,
}
}
pub fn override_system_configuration(
total_memory: Option<usize>,
total_swap: Option<usize>,
nr_cpus: Option<usize>,
) {
let total_memory = total_memory.unwrap_or(0);
let total_swap = total_swap.unwrap_or(0);
let nr_cpus = nr_cpus.unwrap_or(0);
TOTAL_MEMORY.store(total_memory, atomic::Ordering::Relaxed);
TOTAL_SWAP.store(total_swap, atomic::Ordering::Relaxed);
NR_CPUS.store(nr_cpus, atomic::Ordering::Relaxed);
let mut buf = String::new();
if total_memory > 0 {
write!(
buf,
" memory={}->{}",
format_size(*TOTAL_SYSTEM_MEMORY),
format_size(total_memory)
)
.unwrap();
}
if total_swap > 0 {
write!(
buf,
" swap={}->{}",
format_size(*TOTAL_SYSTEM_SWAP),
format_size(total_swap)
)
.unwrap();
}
if nr_cpus > 0 {
write!(buf, " cpus={}->{}", *NR_SYSTEM_CPUS, nr_cpus).unwrap();
}
if buf.len() > 0 {
info!("System configuration overrides:{}", &buf);
}
}
pub fn to_gb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 30) as f64
}
pub fn to_mb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 20) as f64
}
pub fn to_kb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 10) as f64
}
pub fn scale_ratio<T>(ratio: f64, (left, mid, right): (T, T, T)) -> T
where
T: PartialOrd + num::FromPrimitive + num::ToPrimitive,
{
let (left_f64, mid_f64, right_f64) = (
left.to_f64().unwrap(),
mid.to_f64().unwrap(),
right.to_f64().unwrap(),
);
let v = if ratio < 0.5 {
left_f64 + (mid_f64 - left_f64) * ratio / 0.5
} else {
mid_f64 + (right_f64 - mid_f64) * (ratio - 0.5) / 0.5
};
num::clamp(T::from_f64(v).unwrap(), left, right)
}
fn format_size_internal<T>(size: T, zero: &str) -> String
where
T: num::ToPrimitive,
{
let format_size_helper = |size: u64, shift: u32, suffix: &str| -> Option<String> {
let unit: u64 = 1 << shift;
if size < unit {
Some(zero.to_string())
} else if size < 100 * unit {
Some(format!("{:.1}{}", size as f64 / unit as f64, suffix))
} else if size < 1024 * unit {
Some(format!("{:}{}", size / unit, suffix))
} else {
None
}
};
let size = size.to_u64().unwrap();
format_size_helper(size, 0, "B")
.or_else(|| format_size_helper(size, 10, "K"))
.or_else(|| format_size_helper(size, 20, "M"))
.or_else(|| format_size_helper(size, 30, "G"))
.or_else(|| format_size_helper(size, 40, "P"))
.or_else(|| format_size_helper(size, 50, "E"))
.unwrap_or_else(|| "INF".into())
}
pub fn format_size<T>(size: T) -> String
where
T: num::ToPrimitive,
{
format_size_internal(size, "0")
}
pub fn format_size_dashed<T>(size: T) -> String
where
T: num::ToPrimitive,
{
format_size_internal(size, "-")
}
fn format_duration_internal(dur: f64, zero: &str) -> String {
let format_nsecs_helper = |nsecs: u64, unit: u64, max: u64, suffix: &str| -> Option<String> {
if nsecs < unit {
Some(zero.to_string())
} else if nsecs < 100 * unit {
Some(format!("{:.1}{}", nsecs as f64 / unit as f64, suffix))
} else if nsecs < max * unit {
Some(format!("{:}{}", nsecs / unit, suffix))
} else {
None
}
};
let nsecs = (dur * 1_000_000_000.0).round() as u64;
format_nsecs_helper(nsecs, 10_u64.pow(0), 1000, "n")
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(3), 1000, "u"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(6), 1000, "m"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9), 60, "s"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60, 60, "M"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60, 24, "H"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60 * 24, 365, "D"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60 * 24 * 365, 1000, "Y"))
.unwrap_or_else(|| "INF".into())
}
pub fn format_duration(dur: f64) -> String {
format_duration_internal(dur, "0")
}
pub fn format_duration_dashed(dur: f64) -> String {
format_duration_internal(dur, "-")
}
fn format_pct_internal(ratio: f64, zero: &str) -> String {
if ratio == 0.0 {
zero.to_string()
} else if ratio > 0.99 && ratio <= 9.99 {
format!("{:3.0}", ratio * 100.0)
} else if ratio > 9.99 {
"INF".into()
} else {
format!("{:.01}", ratio * 100.0)
}
}
pub fn format_pct(ratio: f64) -> String {
format_pct_internal(ratio, "0")
}
pub fn format_pct_dashed(ratio: f64) -> String {
format_pct_internal(ratio, "-")
}
pub fn parse_duration(input: &str) -> Result<f64> {
lazy_static::lazy_static! {
static ref UNITS: HashMap<char, f64> = [
('n', 0.000_000_001),
('u', 0.000_001),
('m', 0.001),
('s', 1.0),
('M', 60.0),
('H', 3600.0),
('D', 3600.0 * 24.0),
('Y', 3600.0 * 24.0 * 365.0),
]
.iter()
.cloned()
.collect();
}
let mut num = String::new();
let mut sum = 0.0;
for ch in input.chars() {
if UNITS.contains_key(&ch) {
sum += num.trim().parse::<f64>()? * UNITS[&ch];
num.clear();
} else {
num.push(ch);
}
}
if num.trim().len() > 0 {
sum += num.trim().parse::<f64>()?;
}
Ok(sum)
}
fn is_executable<P: AsRef<Path>>(path_in: P) -> bool {
let path = path_in.as_ref();
match path.metadata() {
Ok(md) => md.is_file() && md.mode() & 0o111!= 0,
Err(_) => false,
}
}
pub fn exe_dir() -> Result<PathBuf> {
let mut path = env::current_exe()?;
path.pop();
Ok(path)
}
pub fn find_bin<N: AsRef<OsStr>, P: AsRef<OsStr>>(
name_in: N,
prepend_in: Option<P>,
) -> Option<PathBuf> {
let name = name_in.as_ref();
let mut search = OsString::new();
if let Some(prepend) = prepend_in.as_ref() {
search.push(prepend);
search.push(":");
}
if let Some(dirs) = env::var_os("PATH") {
search.push(dirs);
}
for dir in env::split_paths(&search) {
let mut path = dir.to_owned();
path.push(name);
if let Ok(path) = path.canonicalize() {
if is_executable(&path) {
return Some(path);
}
}
}
None
}
pub fn chgrp<P: AsRef<Path>>(path_in: P, gid: u32) -> Result<bool> {
let path = path_in.as_ref();
let md = fs::metadata(path)?;
if md.st_gid()!= gid {
let cpath = CString::new(path.as_os_str().as_bytes())?;
if unsafe { libc::chown(cpath.as_ptr(), md.st_uid(), gid) } < 0 {
bail!("Failed to chgrp {:?} to {} ({:?})", path, gid, unsafe {
*libc::__errno_location()
});
}
Ok(true)
} else {
Ok(false)
}
}
pub fn set_sgid<P: AsRef<Path>>(path_in: P) -> Result<bool> {
let path = path_in.as_ref();
let md = fs::metadata(path)?;
let mut perm = md.permissions();
if perm.mode() & 0o2000 == 0 {
perm.set_mode(perm.mode() | 0o2000);
fs::set_permissions(path, perm)?;
Ok(true)
} else {
Ok(false)
}
}
pub fn read_one_line<P: AsRef<Path>>(path: P) -> Result<String> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
Ok(r.lines().next().ok_or(anyhow!("File empty"))??)
}
pub fn write_one_line<P: AsRef<Path>>(path: P, line: &str) -> Result<()> {
let mut f = fs::OpenOptions::new().write(true).open(path)?;
Ok(f.write_all(line.as_ref())?)
}
pub fn unix_now() -> u64 {
UNIX_EPOCH.elapsed().unwrap().as_secs()
}
pub fn init_logging(verbosity: u32) {
if std::env::var("RUST_LOG").is_ok() {
env_logger::init();
} else {
let sl_level = match verbosity {
0 | 1 => sl::LevelFilter::Info,
2 => sl::LevelFilter::Debug,
_ => sl::LevelFilter::Trace,
};
let mut lcfg = sl::ConfigBuilder::new();
lcfg.set_time_level(sl::LevelFilter::Off)
.set_location_level(sl::LevelFilter::Off)
.set_target_level(sl::LevelFilter::Off)
.set_thread_level(sl::LevelFilter::Off);
if!console::user_attended_stderr()
|| sl::TermLogger::init(sl_level, lcfg.build(), sl::TerminalMode::Stderr).is_err()
{
sl::SimpleLogger::init(sl_level, lcfg.build()).unwrap();
}
}
}
pub fn child_reader_thread(name: String, stdout: process::ChildStdout, tx: Sender<String>) {
let reader = BufReader::new(stdout);
for line in reader.lines() {
match line {
Ok(line) => {
if let Err(e) = tx.send(line) {
info!("{}: Reader thread terminating ({:?})", &name, &e);
break;
}
}
Err(e) => {
warn!("{}: Failed to read from journalctl ({:?})", &name, &e);
break;
}
}
}
}
pub fn run_command(cmd: &mut Command, emsg: &str) -> Result<()> {
let cmd_str = format!("{:?}", &cmd);
match cmd.status() {
Ok(rc) if rc.success() => Ok(()),
Ok(rc) => bail!("{:?} ({:?}): {}", &cmd_str, &rc, emsg,),
Err(e) => bail!("{:?} ({:?}): {}", &cmd_str, &e, emsg,),
}
}
pub fn fill_area_with_random<T, R: rand::Rng +?Sized>(area: &mut [T], comp: f64, rng: &mut R) {
let area = unsafe {
std::slice::from_raw_parts_mut(
std::mem::transmute::<*mut T, *mut u64>(area.as_mut_ptr()),
area.len() * size_of::<T>() / size_of::<u64>(),
)
};
const BLOCK_SIZE: usize = 512;
const WORDS_PER_BLOCK: usize = BLOCK_SIZE / size_of::<u64>();
let rands_per_block = (((WORDS_PER_BLOCK as f64) * (1.0 - comp)) as usize).min(WORDS_PER_BLOCK);
let last_first = area[0];
for i in 0..area.len() {
area[i] = if i % WORDS_PER_BLOCK < rands_per_block {
rng.gen()
} else {
0
};
}
// guarantee that the first word doesn't stay the same
if area[0] == last_first {
area[0] += 1;
}
}
pub fn read_cgroup_flat_keyed_file(path: &str) -> Result<HashMap<String, u64>> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
let mut map = HashMap::new();
for line in r.lines().filter_map(Result::ok) {
if let Ok((key, val)) = scan_fmt!(&line, "{} {d}", String, u64) {
map.insert(key, val);
}
}
Ok(map)
}
pub fn read_cgroup_nested_keyed_file(
path: &str,
) -> Result<HashMap<String, HashMap<String, String>>> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
let mut top_map = HashMap::new();
for line in r.lines().filter_map(Result::ok) {
let mut split = line.split_whitespace();
let top_key = split.next().unwrap();
let mut map = HashMap::new();
for tok in split {
if let Ok((key, val)) = scan_fmt!(tok, "{}={}", String, String) {
map.insert(key, val);
}
}
top_map.insert(top_key.into(), map);
}
Ok(top_map)
}
struct GlobalProgState {
exiting: bool,
kick_seq: u64,
}
lazy_static::lazy_static! {
static ref PROG_STATE: Mutex<GlobalProgState> = Mutex::new(GlobalProgState {
exiting: false,
kick_seq: 1
});
static ref PROG_WAITQ: Condvar = Condvar::new();
}
thread_local! {
static LOCAL_KICK_SEQ: RefCell<u64> = RefCell::new(0);
}
pub fn setup_prog_state() {
ctrlc::set_handler(move || {
info!("SIGINT/TERM received, exiting...");
set_prog_exiting();
})
.expect("Error setting term handler");
}
pub fn set_prog_exiting() {
PROG_STATE.lock().unwrap().exiting = true;
PROG_WAITQ.notify_all();
}
pub fn prog_exiting() -> bool {
PROG_STATE.lock().unwrap().exiting
}
pub fn prog_kick() {
PROG_STATE.lock().unwrap().kick_seq += 1;
PROG_WAITQ.notify_all();
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum | {
Running,
Exiting,
Kicked,
}
pub fn wait_prog_state(dur: Duration) -> ProgState {
let mut first = true;
let mut state = PROG_STATE.lock().unwrap();
loop {
if state.exiting {
return ProgState::Exiting;
}
if LOCAL_KICK_SEQ.with(|seq| {
if *seq.borrow() < state.kick_seq {
*seq.borrow_mut() = state.kick_seq;
true
} else {
false
}
}) {
return ProgState::Kicked;
}
if first {
state = PROG_WAITQ.wait_timeout(state, dur).unwrap().0;
first = false;
} else {
return ProgState::Running;
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_format_duration() {
for pair in &[
(0.000003932, "3.9u"),
(0.00448, "4.5m"),
(0.3, "300m"),
(2042.0, "34.0M"),
(3456000.0, "40.0D"),
(60480000.0, "1.9Y"),
] {
let result = super::format_duration(pair.0);
assert_eq!(&result, pair.1);
println!("{} -> {} ({})", pair.0, &result, pair.1);
}
}
#[test]
fn test_parse_duration() {
for pair in &[
(0.0000039, "3.9u"),
(0.0044, "4.4m"),
(0.3, "300m"),
(2040.0, "34.0M"),
(3456000.0, "40.0D"),
(59918400.0, "1.9Y"),
(59918401.1, "1.9Y1s100m"),
(59918401.1, "1.9Y1.1s"),
(59918401.102, "1.9Y 1.1s 2000 u"),
(1.27, "1.27"),
(1.37, "100m1.27"),
] {
let result = super::parse_duration(pair.1).unwrap();
assert_eq!(pair. | ProgState | identifier_name |
trie.rs | use crate::config::*;
use crate::louds_dense::LoudsDense;
use crate::louds_sparse::LoudsSparse;
use crate::builder;
pub struct Trie {
louds_dense: LoudsDense,
louds_sparse: LoudsSparse,
suffixes: Vec<Suffix>,
}
// 生ポインタを使えばもっと速くなる
// ベクタofベクタだとキャッシュにも乗らない
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct Suffix {
contents: Vec<u8>,
}
impl Trie {
pub fn new(keys: &V | <u8>>) -> Self {
let include_dense = K_INCLUDE_DENSE;
let sparse_dense = K_SPARSE_DENSE_RATIO;
let mut builder = builder::Builder::new(include_dense, sparse_dense);
builder.build(&keys);
let louds_dense = LoudsDense::new(&builder);
let louds_sparse = LoudsSparse::new(&builder);
let mut num_keys = 0;
for level in 0..louds_sparse.get_height() {
num_keys += builder.get_suffix_counts()[level];
}
let mut suffix_builder: Vec<Suffix> = vec![
Suffix {
contents: Vec::new(),
};
num_keys
];
for i in 0..keys.len() {
if i!= 0 && keys[i] == keys[i - 1] {
continue;
}
let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice());
assert!(key_id < num_keys);
let contents = keys[i][level..].to_vec();
suffix_builder[key_id] = Suffix { contents };
}
// suffix_builder.sort();
// let mut suffix_ptrs: Vec<usize> = vec![0; num_keys];
// let mut suffixes = vec![];
// let mut prev_suffix = Suffix {
// contents: Vec::new(),
// key_id: kNotFound,
// };
// for i in 0..num_keys {
// let curr_suffix = suffix_builder[num_keys - i - 1];
// if curr_suffix.contents.len() == 0 {
// suffix_ptrs[curr_suffix.key_id] = 0;
// continue;
// }
// let mut num_match = 0;
// while num_match < curr_suffix.contents.len()
// && num_match < prev_suffix.contents.len()
// && prev_suffix.contents[num_match] == curr_suffix.contents[num_match]
// {
// num_match += 1;
// }
// if num_match == curr_suffix.contents.len() && prev_suffix.contents.len()!= 0 {
// suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match)
// } else {
// suffix_ptrs[curr_suffix.key_id] = suffixes.len();
// suffixes.push(curr_suffix);
// }
// prev_suffix = curr_suffix;
// }
// let mut suf_bits = 0;
// let mut max_ptr = suffixes.len();
// suf_bits += 1;
// max_ptr >>= 1;
// while max_ptr!= 0 {
// suf_bits += 1;
// max_ptr >>= 1;
// }
// let suffix_ptrs =
return Trie {
louds_dense,
louds_sparse,
suffixes: suffix_builder,
}
}
fn traverse(
louds_dense: &LoudsDense,
louds_sparse: &LoudsSparse,
key: &key_t,
) -> (position_t, level_t) {
let ret = louds_dense.find_key(key);
if ret.0!= K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2!= K_NOT_FOUND {
return louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
fn _traverse(
&self,
key: &key_t,
) -> (position_t, level_t) {
let ret = self.louds_dense.find_key(key);
if ret.0!= K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2!= K_NOT_FOUND {
return self.louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
pub fn exact_search(&self, key: &key_t) -> position_t {
let (key_id, level) = self._traverse(key);
if key_id == K_NOT_FOUND {
return K_NOT_FOUND
}
let suffix = &self.suffixes[key_id].contents;
let length = key.len() - level;
if length!= suffix.len() {
return K_NOT_FOUND
}
for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) {
if cur_key!= cur_suf {
return K_NOT_FOUND
}
}
return key_id
}
// // 見つかったかどうか,直前の探索のログを返したい.
// fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t {
// let diff_level = self.find_different_level(previous_key, key);
// let (key_id, level) =
// if diff_level < self.louds_sparse.get_start_level() {
// let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level);
// if ret.0!= K_NOT_FOUND {
// (ret.0, ret.1)
// } else if ret.2!= K_NOT_FOUND {
// self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level)
// } else {
// (ret.0, ret.1)
// }
// } else {
// self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level)
// };
// }
// fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t {
// let mut diff_level = 0;
// for (p, k) in pre_key.iter().zip(key) {
// if p!= k {
// return diff_level
// } else {
// diff_level += 1;
// }
// }
// return diff_level
// }
// time_range is depends on encoding specification
pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool {
let mut sequnce_count = 0;
let th = TrajectoryHash::new(7, 20, 16);
for key in keys.iter() {
// let result = self.exact_search(&key);
// let is_find = result!= K_NOT_FOUND;
let is_find = self.accurate_search(key, &th);
if is_find {
sequnce_count += 1;
if sequnce_count >= time_range {
return true
}
} else {
sequnce_count = 0;
}
}
return false
}
pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool {
let neighbors = self.get_neighbors(key, th);
for nei in neighbors {
if self.exact_search(nei.as_slice())!= K_NOT_FOUND {
return true
}
}
false
}
pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> {
let mut vec = Vec::with_capacity(EXTEND_NUMBER);
let value: u128 = read_be_u128(key);
// tiles to hash values
for position in ACCURATE_GRID {
let bytes = u128_to_bytes(th.calc(value, position), th.byte_length);
vec.push(bytes);
}
vec
}
}
pub struct TrajectoryHash {
byte_length: usize,
pub mask_lists: [Vec<u128>; 3], // ascend order
}
impl TrajectoryHash {
pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self {
let mut geo_lng_mask = 0b100u128;
let mut geo_lat_mask = 0b010u128;
let mut time_mask = 0b001u128;
let diff = (geo_length as i32) - (time_length as i32);
let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()];
if diff >= 0 {
for _ in 0..time_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
geo_lng_mask >>= 3;
geo_lng_mask <<= 2;
geo_lat_mask >>= 3;
geo_lat_mask <<= 2;
for _ in 0..diff {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 2;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 2;
}
} else {
for _ in 0..geo_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
for _ in 0..(-diff) {
mask_lists[2].push(time_mask);
time_mask <<= 1;
}
}
TrajectoryHash { byte_length, mask_lists }
}
pub fn calc(&self, value: u128, pos: [i32;3]) -> u128 {
let mut updated = value;
for (dimension, direction) in pos.iter().enumerate() {
match direction {
-1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask!= 0 {
updated &=!mask;
break;
} else {
updated |= mask;
}
}
},
0 => {},
1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask == 0 {
updated |= mask;
break;
} else {
updated &=!mask;
}
}
},
_ => panic!("invalid value of direction!")
}
}
updated
}
}
fn read_be_u128(input: &[u8]) -> u128 {
let mut output = 0u128;
let digit = input.len() - 1;
for (i, byte) in input.iter().enumerate() {
output |= (*byte as u128) << 8*(digit - i);
}
output
}
fn u128_to_bytes(value: u128, byte_length: usize) -> Vec<u8> {
value.to_be_bytes()[16-byte_length..].to_vec()
} | ec<Vec | identifier_name |
trie.rs | use crate::config::*;
use crate::louds_dense::LoudsDense;
use crate::louds_sparse::LoudsSparse;
use crate::builder;
pub struct Trie {
louds_dense: LoudsDense,
louds_sparse: LoudsSparse,
suffixes: Vec<Suffix>,
}
// 生ポインタを使えばもっと速くなる
// ベクタofベクタだとキャッシュにも乗らない
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct Suffix {
contents: Vec<u8>,
}
impl Trie {
pub fn new(keys: &Vec<Vec<u8>>) -> Self {
let include_dense = K_INCLUDE_DENSE;
let sparse_dense = K_SPARSE_DENSE_RATIO;
let mut builder = builder::Builder::new(include_dense, sparse_dense);
builder.build(&keys);
let louds_dense = LoudsDense::new(&builder);
let louds_sparse = LoudsSparse::new(&builder);
let mut num_keys = 0;
for level in 0..louds_sparse.get_height() {
num_keys += builder.get_suffix_counts()[level];
}
let mut suffix_builder: Vec<Suffix> = vec![
Suffix {
contents: Vec::new(),
};
num_keys
];
for i in 0..keys.len() {
if i!= 0 && keys[i] == keys[i - 1] {
continue;
}
let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice());
assert!(key_id < num_keys);
let contents = keys[i][level..].to_vec();
suffix_builder[key_id] = Suffix { contents };
}
// suffix_builder.sort();
// let mut suffix_ptrs: Vec<usize> = vec![0; num_keys];
// let mut suffixes = vec![];
// let mut prev_suffix = Suffix {
// contents: Vec::new(),
// key_id: kNotFound,
// };
// for i in 0..num_keys {
// let curr_suffix = suffix_builder[num_keys - i - 1];
// if curr_suffix.contents.len() == 0 {
// suffix_ptrs[curr_suffix.key_id] = 0;
// continue;
// }
// let mut num_match = 0;
// while num_match < curr_suffix.contents.len()
// && num_match < prev_suffix.contents.len()
// && prev_suffix.contents[num_match] == curr_suffix.contents[num_match]
// {
// num_match += 1;
// }
// if num_match == curr_suffix.contents.len() && prev_suffix.contents.len()!= 0 {
// suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match)
// } else {
// suffix_ptrs[curr_suffix.key_id] = suffixes.len();
// suffixes.push(curr_suffix);
// }
// prev_suffix = curr_suffix;
// }
// let mut suf_bits = 0;
// let mut max_ptr = suffixes.len();
// suf_bits += 1;
// max_ptr >>= 1;
// while max_ptr!= 0 {
// suf_bits += 1;
// max_ptr >>= 1;
// }
// let suffix_ptrs =
return Trie {
louds_dense,
louds_sparse,
suffixes: suffix_builder,
}
}
fn traverse(
louds_dense: &LoudsDense,
louds_sparse: &LoudsSparse,
key: &key_t,
) -> (position_t, level_t) {
let ret = louds_dense.find_key(key);
if ret.0!= K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2!= K_N | d_key(key, ret.2);
}
return (ret.0, ret.1);
}
fn _traverse(
&self,
key: &key_t,
) -> (position_t, level_t) {
let ret = self.louds_dense.find_key(key);
if ret.0!= K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2!= K_NOT_FOUND {
return self.louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
pub fn exact_search(&self, key: &key_t) -> position_t {
let (key_id, level) = self._traverse(key);
if key_id == K_NOT_FOUND {
return K_NOT_FOUND
}
let suffix = &self.suffixes[key_id].contents;
let length = key.len() - level;
if length!= suffix.len() {
return K_NOT_FOUND
}
for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) {
if cur_key!= cur_suf {
return K_NOT_FOUND
}
}
return key_id
}
// // 見つかったかどうか,直前の探索のログを返したい.
// fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t {
// let diff_level = self.find_different_level(previous_key, key);
// let (key_id, level) =
// if diff_level < self.louds_sparse.get_start_level() {
// let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level);
// if ret.0!= K_NOT_FOUND {
// (ret.0, ret.1)
// } else if ret.2!= K_NOT_FOUND {
// self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level)
// } else {
// (ret.0, ret.1)
// }
// } else {
// self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level)
// };
// }
// fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t {
// let mut diff_level = 0;
// for (p, k) in pre_key.iter().zip(key) {
// if p!= k {
// return diff_level
// } else {
// diff_level += 1;
// }
// }
// return diff_level
// }
// time_range is depends on encoding specification
pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool {
let mut sequnce_count = 0;
let th = TrajectoryHash::new(7, 20, 16);
for key in keys.iter() {
// let result = self.exact_search(&key);
// let is_find = result!= K_NOT_FOUND;
let is_find = self.accurate_search(key, &th);
if is_find {
sequnce_count += 1;
if sequnce_count >= time_range {
return true
}
} else {
sequnce_count = 0;
}
}
return false
}
pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool {
let neighbors = self.get_neighbors(key, th);
for nei in neighbors {
if self.exact_search(nei.as_slice())!= K_NOT_FOUND {
return true
}
}
false
}
pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> {
let mut vec = Vec::with_capacity(EXTEND_NUMBER);
let value: u128 = read_be_u128(key);
// tiles to hash values
for position in ACCURATE_GRID {
let bytes = u128_to_bytes(th.calc(value, position), th.byte_length);
vec.push(bytes);
}
vec
}
}
pub struct TrajectoryHash {
byte_length: usize,
pub mask_lists: [Vec<u128>; 3], // ascend order
}
impl TrajectoryHash {
pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self {
let mut geo_lng_mask = 0b100u128;
let mut geo_lat_mask = 0b010u128;
let mut time_mask = 0b001u128;
let diff = (geo_length as i32) - (time_length as i32);
let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()];
if diff >= 0 {
for _ in 0..time_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
geo_lng_mask >>= 3;
geo_lng_mask <<= 2;
geo_lat_mask >>= 3;
geo_lat_mask <<= 2;
for _ in 0..diff {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 2;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 2;
}
} else {
for _ in 0..geo_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
for _ in 0..(-diff) {
mask_lists[2].push(time_mask);
time_mask <<= 1;
}
}
TrajectoryHash { byte_length, mask_lists }
}
pub fn calc(&self, value: u128, pos: [i32;3]) -> u128 {
let mut updated = value;
for (dimension, direction) in pos.iter().enumerate() {
match direction {
-1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask!= 0 {
updated &=!mask;
break;
} else {
updated |= mask;
}
}
},
0 => {},
1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask == 0 {
updated |= mask;
break;
} else {
updated &=!mask;
}
}
},
_ => panic!("invalid value of direction!")
}
}
updated
}
}
fn read_be_u128(input: &[u8]) -> u128 {
let mut output = 0u128;
let digit = input.len() - 1;
for (i, byte) in input.iter().enumerate() {
output |= (*byte as u128) << 8*(digit - i);
}
output
}
fn u128_to_bytes(value: u128, byte_length: usize) -> Vec<u8> {
value.to_be_bytes()[16-byte_length..].to_vec()
} | OT_FOUND {
return louds_sparse.fin | conditional_block |
trie.rs | use crate::config::*;
use crate::louds_dense::LoudsDense;
use crate::louds_sparse::LoudsSparse;
use crate::builder;
pub struct Trie {
louds_dense: LoudsDense,
louds_sparse: LoudsSparse,
suffixes: Vec<Suffix>,
}
// 生ポインタを使えばもっと速くなる
// ベクタofベクタだとキャッシュにも乗らない
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct Suffix {
contents: Vec<u8>,
}
impl Trie {
pub fn new(keys: &Vec<Vec<u8>>) -> Self {
let include_dense = K_INCLUDE_DENSE;
let sparse_dense = K_SPARSE_DENSE_RATIO;
let mut builder = builder::Builder::new(include_dense, sparse_dense);
builder.build(&keys);
let louds_dense = LoudsDense::new(&builder);
let louds_sparse = LoudsSparse::new(&builder);
let mut num_keys = 0;
for level in 0..louds_sparse.get_height() {
num_keys += builder.get_suffix_counts()[level];
}
let mut suffix_builder: Vec<Suffix> = vec![
Suffix {
contents: Vec::new(),
};
num_keys
];
for i in 0..keys.len() {
if i!= 0 && keys[i] == keys[i - 1] {
continue;
}
let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice());
assert!(key_id < num_keys);
let contents = keys[i][level..].to_vec();
suffix_builder[key_id] = Suffix { contents };
}
// suffix_builder.sort();
// let mut suffix_ptrs: Vec<usize> = vec![0; num_keys];
// let mut suffixes = vec![];
// let mut prev_suffix = Suffix {
// contents: Vec::new(),
// key_id: kNotFound,
// };
// for i in 0..num_keys {
// let curr_suffix = suffix_builder[num_keys - i - 1];
// if curr_suffix.contents.len() == 0 {
// suffix_ptrs[curr_suffix.key_id] = 0;
// continue;
// }
// let mut num_match = 0;
// while num_match < curr_suffix.contents.len()
// && num_match < prev_suffix.contents.len()
// && prev_suffix.contents[num_match] == curr_suffix.contents[num_match]
// {
// num_match += 1;
// }
// if num_match == curr_suffix.contents.len() && prev_suffix.contents.len()!= 0 {
// suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match)
// } else {
// suffix_ptrs[curr_suffix.key_id] = suffixes.len();
// suffixes.push(curr_suffix);
// }
// prev_suffix = curr_suffix;
// }
// let mut suf_bits = 0;
// let mut max_ptr = suffixes.len();
// suf_bits += 1;
// max_ptr >>= 1;
// while max_ptr!= 0 {
// suf_bits += 1;
// max_ptr >>= 1;
// }
// let suffix_ptrs =
return Trie {
louds_dense,
louds_sparse,
suffixes: suffix_builder,
}
}
fn traverse(
louds_dense: &LoudsDense,
louds_sparse: &LoudsSparse,
key: &key_t,
) -> (position_t, level_t) {
let ret = louds_dense.find_key(key);
if ret.0!= K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2!= K_NOT_FOUND {
return louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
fn _traverse(
&self,
key: &key_t,
) -> (position_t, level_t) {
let ret = self.louds_dense.find_key(key);
if ret.0!= K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2!= K_NOT_FOUND {
return self.louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
pub fn exact_search(&self, key: &key_t) -> position_t {
let (key_id, level) = self._traverse(key);
if key_id == K_NOT_FOUND {
return K_NOT_FOUND
}
let suffix = &self.suffixes[key_id].contents;
let length = key.len() - level;
if length!= suffix.len() {
return K_NOT_FOUND
}
for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) {
if cur_key!= cur_suf {
return K_NOT_FOUND
}
}
return key_id
}
// // 見つかったかどうか,直前の探索のログを返したい.
// fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t {
// let diff_level = self.find_different_level(previous_key, key);
// let (key_id, level) =
// if diff_level < self.louds_sparse.get_start_level() {
// let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level);
// if ret.0!= K_NOT_FOUND {
// (ret.0, ret.1)
// } else if ret.2!= K_NOT_FOUND {
// self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level)
// } else {
// (ret.0, ret.1)
// }
// } else {
// self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level)
// };
// }
// fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t {
// let mut diff_level = 0;
// for (p, k) in pre_key.iter().zip(key) {
// if p!= k {
// return diff_level
// } else {
// diff_level += 1;
// }
// }
// return diff_level
// }
// time_range is depends on encoding specification
pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool {
let mut sequnce_count = 0;
let th = TrajectoryHash::new(7, 20, 16);
for key in keys.iter() {
// let result = self.exact_search(&key);
// let is_find = result!= K_NOT_FOUND;
let is_find = self.accurate_search(key, &th);
if is_find {
sequnce_count += 1;
if sequnce_count >= time_range {
return true
}
} else {
sequnce_count = 0;
}
}
return false
}
pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool {
let neighbors = self.get_neighbors(key, th);
for nei in neighbors {
if self.exact_search(nei.as_slice())!= K_NOT_FOUND {
return true
}
}
false
}
pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> {
let mut vec = Vec::with_capacity(EXTEND_NUMBER);
let value: u128 = read_be_u128(key);
// tiles to hash values
for position in ACCURATE_GRID {
let bytes = u128_to_bytes(th.calc(value, position), th.byte_length);
vec.push(bytes);
}
vec
}
}
pub struct TrajectoryHash {
byte_length: usize,
pub mask_lists: [Vec<u128>; 3], // ascend order
}
impl TrajectoryHash {
pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self {
let mut geo_lng_mask = 0b100u128;
let mut geo_lat_mask = 0b010u128;
let mut time_mask = 0b001u128;
let diff = (geo_length as i32) - (time_length as i32);
let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()];
if diff >= 0 {
for _ in 0..time_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
geo_lng_mask >>= 3;
geo_lng_mask <<= 2;
geo_lat_mask >>= 3;
geo_lat_mask <<= 2;
for _ in 0..diff {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 2;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 2;
}
} else {
for _ in 0..geo_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
for _ in 0..(-diff) {
mask_lists[2].push(time_mask);
time_mask <<= 1;
}
}
TrajectoryHash { byte_length, mask_lists }
}
pub fn calc(&self, value: u128, pos: [i32;3]) -> u128 {
let mut updated = value;
for (dimension, direction) in pos.iter().enumerate() {
match direction {
-1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask!= 0 {
updated &=!mask;
break;
} else {
updated |= mask;
}
}
},
0 => {},
1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask == 0 {
updated |= mask;
break;
} else {
updated &=!mask;
}
}
},
_ => panic!("invalid value of direction!")
}
}
updated
}
}
fn read_be_u128(input: &[u8]) -> u128 {
let mut output = 0u128;
let digit = input.len() - 1;
for (i, byte) in input.iter().enumerate() {
output |= (*byte as u128) << 8*(digit - i);
}
output
}
fn u128_to_bytes(value: u128, byte_length: usize) -> Vec<u8> {
value.to_be_bytes()[16-byte_length..].to_vec()
} | identifier_body |
||
trie.rs | use crate::config::*;
use crate::louds_dense::LoudsDense;
use crate::louds_sparse::LoudsSparse;
use crate::builder;
pub struct Trie {
louds_dense: LoudsDense,
louds_sparse: LoudsSparse,
suffixes: Vec<Suffix>,
}
// 生ポインタを使えばもっと速くなる
// ベクタofベクタだとキャッシュにも乗らない
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct Suffix {
contents: Vec<u8>,
}
impl Trie {
pub fn new(keys: &Vec<Vec<u8>>) -> Self {
let include_dense = K_INCLUDE_DENSE;
let sparse_dense = K_SPARSE_DENSE_RATIO;
let mut builder = builder::Builder::new(include_dense, sparse_dense);
builder.build(&keys);
let louds_dense = LoudsDense::new(&builder);
let louds_sparse = LoudsSparse::new(&builder);
let mut num_keys = 0;
for level in 0..louds_sparse.get_height() {
num_keys += builder.get_suffix_counts()[level];
}
let mut suffix_builder: Vec<Suffix> = vec![
Suffix {
contents: Vec::new(),
};
num_keys
];
for i in 0..keys.len() {
if i!= 0 && keys[i] == keys[i - 1] {
continue;
}
let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice());
assert!(key_id < num_keys);
let contents = keys[i][level..].to_vec();
suffix_builder[key_id] = Suffix { contents };
}
// suffix_builder.sort();
// let mut suffix_ptrs: Vec<usize> = vec![0; num_keys];
// let mut suffixes = vec![];
// let mut prev_suffix = Suffix {
// contents: Vec::new(),
// key_id: kNotFound,
// };
// for i in 0..num_keys {
// let curr_suffix = suffix_builder[num_keys - i - 1];
// if curr_suffix.contents.len() == 0 {
// suffix_ptrs[curr_suffix.key_id] = 0;
// continue;
// }
// let mut num_match = 0;
// while num_match < curr_suffix.contents.len()
// && num_match < prev_suffix.contents.len()
// && prev_suffix.contents[num_match] == curr_suffix.contents[num_match]
// {
// num_match += 1;
// }
// if num_match == curr_suffix.contents.len() && prev_suffix.contents.len()!= 0 {
// suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match)
// } else {
// suffix_ptrs[curr_suffix.key_id] = suffixes.len();
// suffixes.push(curr_suffix);
// }
// prev_suffix = curr_suffix;
// }
// let mut suf_bits = 0;
// let mut max_ptr = suffixes.len();
// suf_bits += 1;
// max_ptr >>= 1;
// while max_ptr!= 0 {
// suf_bits += 1;
// max_ptr >>= 1;
// }
// let suffix_ptrs =
return Trie {
louds_dense,
louds_sparse,
suffixes: suffix_builder,
}
}
fn traverse(
louds_dense: &LoudsDense,
louds_sparse: &LoudsSparse,
key: &key_t,
) -> (position_t, level_t) {
let ret = louds_dense.find_key(key);
if ret.0!= K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2!= K_NOT_FOUND {
return louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
fn _traverse(
&self,
key: &key_t,
) -> (position_t, level_t) {
let ret = self.louds_dense.find_key(key);
if ret.0!= K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2!= K_NOT_FOUND {
return self.louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
pub fn exact_search(&self, key: &key_t) -> position_t {
let (key_id, level) = self._traverse(key);
if key_id == K_NOT_FOUND {
return K_NOT_FOUND
}
let suffix = &self.suffixes[key_id].contents;
let length = key.len() - level;
if length!= suffix.len() {
return K_NOT_FOUND
}
for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) {
if cur_key!= cur_suf {
return K_NOT_FOUND
}
}
return key_id
}
// // 見つかったかどうか,直前の探索のログを返したい.
// fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t {
// let diff_level = self.find_different_level(previous_key, key);
// let (key_id, level) =
// if diff_level < self.louds_sparse.get_start_level() {
// let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level);
// if ret.0!= K_NOT_FOUND {
// (ret.0, ret.1)
// } else if ret.2!= K_NOT_FOUND {
// self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level)
// } else {
// (ret.0, ret.1)
// }
// } else {
// self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level)
// };
// }
// fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t {
// let mut diff_level = 0;
// for (p, k) in pre_key.iter().zip(key) {
// if p!= k {
// return diff_level
// } else {
// diff_level += 1;
// }
// }
// return diff_level
// }
// time_range is depends on encoding specification
pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool {
let mut sequnce_count = 0;
let th = TrajectoryHash::new(7, 20, 16);
for key in keys.iter() {
// let result = self.exact_search(&key);
// let is_find = result!= K_NOT_FOUND;
let is_find = self.accurate_search(key, &th);
if is_find {
sequnce_count += 1;
if sequnce_count >= time_range {
return true
}
} else {
sequnce_count = 0;
}
}
return false
}
pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool {
let neighbors = self.get_neighbors(key, th);
for nei in neighbors {
if self.exact_search(nei.as_slice())!= K_NOT_FOUND {
return true
}
}
false
}
pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> {
let mut vec = Vec::with_capacity(EXTEND_NUMBER);
let value: u128 = read_be_u128(key);
// tiles to hash values
for position in ACCURATE_GRID {
let bytes = u128_to_bytes(th.calc(value, position), th.byte_length);
vec.push(bytes);
}
vec
}
}
pub struct TrajectoryHash {
byte_length: usize,
pub mask_lists: [Vec<u128>; 3], // ascend order
}
impl TrajectoryHash {
pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self {
let mut geo_lng_mask = 0b100u128;
let mut geo_lat_mask = 0b010u128;
let mut time_mask = 0b001u128;
let diff = (geo_length as i32) - (time_length as i32);
let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()];
if diff >= 0 {
for _ in 0..time_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
geo_lng_mask >>= 3;
geo_lng_mask <<= 2;
geo_lat_mask >>= 3;
geo_lat_mask <<= 2;
for _ in 0..diff {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 2;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 2;
}
} else {
for _ in 0..geo_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
for _ in 0..(-diff) {
mask_lists[2].push(time_mask);
time_mask <<= 1;
}
}
TrajectoryHash { byte_length, mask_lists }
}
pub fn calc(&self, value: u128, pos: [i32;3]) -> u128 {
let mut updated = value;
for (dimension, direction) in pos.iter().enumerate() {
match direction {
-1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask!= 0 {
updated &=!mask; | }
},
0 => {},
1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask == 0 {
updated |= mask;
break;
} else {
updated &=!mask;
}
}
},
_ => panic!("invalid value of direction!")
}
}
updated
}
}
fn read_be_u128(input: &[u8]) -> u128 {
let mut output = 0u128;
let digit = input.len() - 1;
for (i, byte) in input.iter().enumerate() {
output |= (*byte as u128) << 8*(digit - i);
}
output
}
fn u128_to_bytes(value: u128, byte_length: usize) -> Vec<u8> {
value.to_be_bytes()[16-byte_length..].to_vec()
} | break;
} else {
updated |= mask;
} | random_line_split |
verifier.rs | use argon2::{defaults, Argon2, ParamErr, Variant, Version};
use std::error::Error;
/// The main export here is `Encoded`. See `examples/verify.rs` for usage
/// examples.
use std::{fmt, str};
macro_rules! maybe {
($e: expr) => {
match $e {
None => return None,
Some(v) => v,
}
};
}
const LUT64: &'static [u8; 64] =
b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
fn lut(n: u8) -> u8 {
LUT64[n as usize & 0x3f]
}
fn delut(c: u8) -> Option<u8> {
match c {
43 => Some(62),
47 => Some(63),
_ if 65 <= c && c <= 90 => Some(c - 65),
_ if 97 <= c && c <= 122 => Some(c - 71),
_ if 48 <= c && c <= 57 => Some(c + 4),
_ => None,
}
}
fn quad(n: &[u8]) -> [u8; 4] {
assert_eq!(n.len(), 3);
let (b, c) = (n[1] >> 4 | n[0] << 4, n[2] >> 6 | n[1] << 2);
[lut(n[0] >> 2), lut(b), lut(c), lut(n[2])]
}
fn triplet(n: &[u8]) -> Option<[u8; 3]> {
assert_eq!(n.len(), 4);
let a = maybe!(delut(n[0]));
let b = maybe!(delut(n[1]));
let c = maybe!(delut(n[2]));
let d = maybe!(delut(n[3]));
Some([a << 2 | b >> 4, b << 4 | c >> 2, c << 6 | d])
}
fn base64_no_pad(bytes: &[u8]) -> Vec<u8> {
let mut rv = vec![];
let mut pos = 0;
while pos + 3 <= bytes.len() {
rv.extend_from_slice(&quad(&bytes[pos..pos + 3]));
pos += 3;
}
if bytes.len() - pos == 1 {
rv.push(lut(bytes[pos] >> 2));
rv.push(lut((bytes[pos] & 0x03) << 4));
} else if bytes.len() - pos == 2 {
rv.extend_from_slice(&quad(&[bytes[pos], bytes[pos + 1], 0]));
rv.pop();
} | if bytes.len() % 4!= 1 && bytes.len() > 0 {
let mut rv = vec![];
let mut pos = 0;
while pos + 4 <= bytes.len() {
let s = maybe!(triplet(&bytes[pos..pos + 4]));
rv.extend_from_slice(&s);
pos += 4;
}
if bytes.len() - pos == 2 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
rv.push(a << 2 | b >> 4);
} else if bytes.len() - pos == 3 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
let c = maybe!(delut(bytes[pos + 2]));
rv.push(a << 2 | b >> 4);
rv.push(b << 4 | c >> 2);
}
Some(rv)
} else {
None
}
}
struct Parser<'a> {
enc: &'a [u8],
pos: usize,
}
impl<'a> fmt::Debug for Parser<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[..self.pos]))?;
write!(f, "<-- {} -->", self.pos)?;
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[self.pos..]))?;
Ok(())
}
}
type Parsed<T> = Result<T, usize>;
impl<'a> Parser<'a> {
fn expect(&mut self, exp: &[u8]) -> Parsed<()> {
assert!(self.pos < self.enc.len());
if self.enc.len() - self.pos < exp.len() || &self.enc[self.pos..self.pos + exp.len()]!= exp
{
self.err()
} else {
self.pos += exp.len();
Ok(())
}
}
fn read_until(&mut self, stopchar: u8) -> &'a [u8] {
let start = self.pos;
let stop = |c: &u8| *c == stopchar;
self.pos = match self.enc[self.pos..].iter().position(stop) {
None => self.enc.len() - 1,
Some(end) => self.pos + end,
};
&self.enc[start..self.pos]
}
fn read_u32(&mut self) -> Parsed<u32> {
let is_digit = |c: u8| 48 <= c && c <= 57;
let mut end = self.pos;
while end < self.enc.len() && is_digit(self.enc[end]) {
end += 1;
}
match str::from_utf8(&self.enc[self.pos..end]) {
Err(_) => self.err(),
Ok(s) => match s.parse() {
Err(_) => self.err(),
Ok(n) => {
self.pos = end;
Ok(n)
}
},
}
}
fn read_version(&mut self) -> Parsed<Version> {
self.read_u32().and_then(|vers| match vers {
0x10 => Ok(Version::_0x10),
0x13 => Ok(Version::_0x13),
_ => self.err(),
})
}
fn decode64_till_one_of(&mut self, char_set: &[u8]) -> Parsed<Vec<u8>> {
let end = self.enc[self.pos..]
.iter()
.position(|c| char_set.contains(c))
.map(|sub_pos| self.pos + sub_pos)
.unwrap_or_else(|| self.enc.len());
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn decode64_till(&mut self, stopchar: Option<u8>) -> Parsed<Vec<u8>> {
let end = match stopchar {
None => self.enc.len(),
Some(c) => {
self.enc[self.pos..]
.iter()
.take_while(|k| **k!= c)
.fold(0, |c, _| c + 1)
+ self.pos
}
};
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn err<T>(&self) -> Parsed<T> {
Err(self.pos)
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum DecodeError {
/// Byte position of first parse error
ParseError(usize),
/// Invalid Argon2 parameters given in encoding
InvalidParams(ParamErr),
}
impl fmt::Display for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DecodeError::*;
match *self {
ParseError(pos) => write!(f, "Parse error at position {}", pos),
InvalidParams(ref perr) => {
write!(f, "Invalid hash parameters given by encoded: {}", perr)
}
}
}
}
impl Error for DecodeError {
fn description(&self) -> &str {
match *self {
DecodeError::ParseError(_) => "Hash string parse error.",
DecodeError::InvalidParams(ref perr) => perr.description(),
}
}
}
/// Represents a single Argon2 hashing session. A hash session comprises of the
/// hash algorithm parameters, salt, key, and data used to hash a given input.
#[derive(Debug, Eq, PartialEq)]
pub struct Encoded {
params: Argon2,
hash: Vec<u8>,
salt: Vec<u8>,
key: Vec<u8>,
data: Vec<u8>,
}
type Packed = (
Variant,
Version,
u32,
u32,
u32,
Vec<u8>,
Vec<u8>,
Vec<u8>,
Vec<u8>,
);
impl Encoded {
fn parse(encoded: &[u8]) -> Result<Packed, usize> {
let mut p = Parser {
enc: encoded,
pos: 0,
};
p.expect(b"$argon2")?;
let variant = match p.read_until('$' as u8) {
b"d" => Variant::Argon2d,
b"i" => Variant::Argon2i,
b"id" => Variant::Argon2id,
x => return Err(p.pos - x.len()),
};
p.expect(b"$")?;
let vers = match p.expect(b"v=") {
// Match the c reference impl's behavior, which defaults to a v0x10
// hash encoding since the `v=` field was only introduced with
// v0x13.
Err(_) => Version::_0x10,
Ok(()) => {
let vers = p.read_version()?;
p.expect(b",")?;
vers
}
};
p.expect(b"m=")?;
let kib = p.read_u32()?;
p.expect(b",t=")?;
let passes = p.read_u32()?;
p.expect(b",p=")?;
let lanes = p.read_u32()?;
let key = match p.expect(b",keyid=") {
Err(_) => vec![],
Ok(()) => p.decode64_till_one_of(b",$")?,
};
let data = match p.expect(b",data=") {
Ok(()) => p.decode64_till(Some(b'$'))?,
Err(_) => vec![],
};
p.expect(b"$")?;
let salt = p.decode64_till(Some(b'$'))?;
p.expect(b"$")?;
let hash = p.decode64_till(None)?;
Ok((variant, vers, kib, passes, lanes, key, data, salt, hash))
}
/// Reconstruct a previous hash session from serialized bytes.
pub fn from_u8(encoded: &[u8]) -> Result<Self, DecodeError> {
match Self::parse(encoded) {
Err(pos) => Err(DecodeError::ParseError(pos)),
Ok((v, vers, kib, passes, lanes, key, data, salt, hash)) => {
match Argon2::with_version(passes, lanes, kib, v, vers) {
Err(e) => Err(DecodeError::InvalidParams(e)),
Ok(a2) => Ok(Encoded {
params: a2,
hash: hash,
salt: salt,
key: key,
data: data,
}),
}
}
}
}
/// Serialize this hashing session into raw bytes that can later be
/// recovered by `Encoded::from_u8`.
pub fn to_u8(&self) -> Vec<u8> {
let vcode = |v| match v {
Variant::Argon2i => "i",
Variant::Argon2d => "d",
Variant::Argon2id => "id",
};
let b64 = |x| String::from_utf8(base64_no_pad(x)).unwrap();
let k_ = match &b64(&self.key[..]) {
bytes if bytes.len() > 0 => format!(",keyid={}", bytes),
_ => String::new(),
};
let x_ = match &b64(&self.data[..]) {
bytes if bytes.len() > 0 => format!(",data={}", bytes),
_ => String::new(),
};
let (var, m, t, p, vers) = self.params();
format!(
"$argon2{}$v={},m={},t={},p={}{}{}${}${}",
vcode(var),
vers as usize,
m,
t,
p,
k_,
x_,
b64(&self.salt[..]),
b64(&self.hash)
)
.into_bytes()
}
/// Generates a new hashing session from password, salt, and other byte
/// input. Parameters are:
///
/// `argon`: An `Argon2` struct representative of the desired hash algorithm
/// parameters.
///
/// `p`: Password input.
///
/// `s`: Salt.
///
/// `k`: An optional secret value.
///
/// `x`: Optional, miscellaneous associated data.
///
/// Note that `p, s, k, x` must conform to the same length constraints
/// dictated by `Argon2::hash`.
pub fn new(argon: Argon2, p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
let mut out = vec![0 as u8; defaults::LENGTH];
argon.hash(&mut out[..], p, s, k, x);
Encoded {
params: argon,
hash: out,
salt: s.iter().cloned().collect(),
key: k.iter().cloned().collect(),
data: x.iter().cloned().collect(),
}
}
/// Same as `Encoded::new`, but with the default Argon2i hash algorithm
/// parameters.
pub fn default2i(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2i), p, s, k, x)
}
/// Same as `Encoded::new`, but with the default _Argon2d_ hash algorithm
/// parameters.
pub fn default2d(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2d), p, s, k, x)
}
/// Verifies password input against the hash that was previously created in
/// this hashing session.
pub fn verify(&self, p: &[u8]) -> bool {
let mut out = [0 as u8; defaults::LENGTH];
let s = &self.salt[..];
self.params
.hash(&mut out, p, s, &self.key[..], &self.data[..]);
constant_eq(&out, &self.hash)
}
/// Provides read-only access to the Argon2 parameters of this hash.
pub fn params(&self) -> (Variant, u32, u32, u32, Version) {
self.params.params()
}
}
/// Compares two byte arrays for equality. Assumes that both are already of
/// equal length.
#[inline(never)]
pub fn constant_eq(xs: &[u8], ys: &[u8]) -> bool {
if xs.len()!= ys.len() {
false
} else {
let rv = xs.iter().zip(ys.iter()).fold(0, |rv, (x, y)| rv | (x ^ y));
// this kills the optimizer.
(1 & (rv as u32).wrapping_sub(1) >> 8).wrapping_sub(1) == 0
}
}
#[cfg(test)]
mod test {
use super::{base64_no_pad, debase64_no_pad, Encoded};
const BASE64_CASES: [(&'static [u8], &'static [u8]); 5] = [
(b"any carnal pleasure.", b"YW55IGNhcm5hbCBwbGVhc3VyZS4"),
(b"any carnal pleasure", b"YW55IGNhcm5hbCBwbGVhc3VyZQ"),
(b"any carnal pleasur", b"YW55IGNhcm5hbCBwbGVhc3Vy"),
(b"any carnal pleasu", b"YW55IGNhcm5hbCBwbGVhc3U"),
(b"any carnal pleas", b"YW55IGNhcm5hbCBwbGVhcw"),
];
const ENCODED: &'static [&'static [u8]] = &[
b"$argon2i$m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
// ^ ensures that default version is 0x10.
b"$argon2i$v=16,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
b"$argon2i$v=19,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$AvsXI+N78kGHzeGwzz0VTjfBdl7MmgvBGfJ/XXyqLbA",
];
#[test]
fn test_base64_no_pad() {
for &(s, exp) in BASE64_CASES.iter() {
assert_eq!(&base64_no_pad(s)[..], exp);
}
}
#[test]
fn test_debase64_no_pad() {
for &(exp, s) in BASE64_CASES.iter() {
assert_eq!(debase64_no_pad(s).unwrap(), exp);
}
}
#[test]
fn test_verify() {
for &hash_string in ENCODED {
let v = Encoded::from_u8(hash_string).unwrap();
assert_eq!(v.verify(b"argon2i!"), true);
assert_eq!(v.verify(b"nope"), false);
}
}
#[test]
fn encode_decode() {
for &(s, _) in BASE64_CASES.iter() {
let salt = b"Yum! Extra salty";
let key = b"ff5dfa4d7a048f9db4ad0caad82e75c";
let enc = Encoded::default2i(s, salt, key, &[]);
assert_eq!(Encoded::from_u8(&enc.to_u8()), Ok(enc));
}
}
#[test]
fn bad_encoded() {
use super::DecodeError::*;
use argon2::ParamErr::*;
let cases: &[(&'static [u8], super::DecodeError)] = &[
(b"$argon2y$v=19,m=4096", ParseError(7)),
(
b"$argon2i$v=19,m=-2,t=-4,p=-4$aaaaaaaa$ffffff",
ParseError(16),
),
// ^ negative m is invalid.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff*",
ParseError(35),
),
// ^ asterisk is invalid base64 char.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff",
InvalidParams(TooFewPasses),
),
// ^ p = 0 is invalid.
(b"$argon2i$m", ParseError(9)),
];
// ^ intentionally fail Encoded::expect with undersized input
for &(case, err) in cases.iter() {
let v = Encoded::from_u8(case);
assert!(v.is_err());
assert_eq!(v.err().unwrap(), err);
}
}
} | rv
}
fn debase64_no_pad(bytes: &[u8]) -> Option<Vec<u8>> { | random_line_split |
verifier.rs | use argon2::{defaults, Argon2, ParamErr, Variant, Version};
use std::error::Error;
/// The main export here is `Encoded`. See `examples/verify.rs` for usage
/// examples.
use std::{fmt, str};
macro_rules! maybe {
($e: expr) => {
match $e {
None => return None,
Some(v) => v,
}
};
}
const LUT64: &'static [u8; 64] =
b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
fn lut(n: u8) -> u8 {
LUT64[n as usize & 0x3f]
}
fn delut(c: u8) -> Option<u8> {
match c {
43 => Some(62),
47 => Some(63),
_ if 65 <= c && c <= 90 => Some(c - 65),
_ if 97 <= c && c <= 122 => Some(c - 71),
_ if 48 <= c && c <= 57 => Some(c + 4),
_ => None,
}
}
fn quad(n: &[u8]) -> [u8; 4] {
assert_eq!(n.len(), 3);
let (b, c) = (n[1] >> 4 | n[0] << 4, n[2] >> 6 | n[1] << 2);
[lut(n[0] >> 2), lut(b), lut(c), lut(n[2])]
}
fn triplet(n: &[u8]) -> Option<[u8; 3]> {
assert_eq!(n.len(), 4);
let a = maybe!(delut(n[0]));
let b = maybe!(delut(n[1]));
let c = maybe!(delut(n[2]));
let d = maybe!(delut(n[3]));
Some([a << 2 | b >> 4, b << 4 | c >> 2, c << 6 | d])
}
fn base64_no_pad(bytes: &[u8]) -> Vec<u8> {
let mut rv = vec![];
let mut pos = 0;
while pos + 3 <= bytes.len() {
rv.extend_from_slice(&quad(&bytes[pos..pos + 3]));
pos += 3;
}
if bytes.len() - pos == 1 {
rv.push(lut(bytes[pos] >> 2));
rv.push(lut((bytes[pos] & 0x03) << 4));
} else if bytes.len() - pos == 2 {
rv.extend_from_slice(&quad(&[bytes[pos], bytes[pos + 1], 0]));
rv.pop();
}
rv
}
fn debase64_no_pad(bytes: &[u8]) -> Option<Vec<u8>> | }
Some(rv)
} else {
None
}
}
struct Parser<'a> {
enc: &'a [u8],
pos: usize,
}
impl<'a> fmt::Debug for Parser<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[..self.pos]))?;
write!(f, "<-- {} -->", self.pos)?;
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[self.pos..]))?;
Ok(())
}
}
type Parsed<T> = Result<T, usize>;
impl<'a> Parser<'a> {
fn expect(&mut self, exp: &[u8]) -> Parsed<()> {
assert!(self.pos < self.enc.len());
if self.enc.len() - self.pos < exp.len() || &self.enc[self.pos..self.pos + exp.len()]!= exp
{
self.err()
} else {
self.pos += exp.len();
Ok(())
}
}
fn read_until(&mut self, stopchar: u8) -> &'a [u8] {
let start = self.pos;
let stop = |c: &u8| *c == stopchar;
self.pos = match self.enc[self.pos..].iter().position(stop) {
None => self.enc.len() - 1,
Some(end) => self.pos + end,
};
&self.enc[start..self.pos]
}
fn read_u32(&mut self) -> Parsed<u32> {
let is_digit = |c: u8| 48 <= c && c <= 57;
let mut end = self.pos;
while end < self.enc.len() && is_digit(self.enc[end]) {
end += 1;
}
match str::from_utf8(&self.enc[self.pos..end]) {
Err(_) => self.err(),
Ok(s) => match s.parse() {
Err(_) => self.err(),
Ok(n) => {
self.pos = end;
Ok(n)
}
},
}
}
fn read_version(&mut self) -> Parsed<Version> {
self.read_u32().and_then(|vers| match vers {
0x10 => Ok(Version::_0x10),
0x13 => Ok(Version::_0x13),
_ => self.err(),
})
}
fn decode64_till_one_of(&mut self, char_set: &[u8]) -> Parsed<Vec<u8>> {
let end = self.enc[self.pos..]
.iter()
.position(|c| char_set.contains(c))
.map(|sub_pos| self.pos + sub_pos)
.unwrap_or_else(|| self.enc.len());
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn decode64_till(&mut self, stopchar: Option<u8>) -> Parsed<Vec<u8>> {
let end = match stopchar {
None => self.enc.len(),
Some(c) => {
self.enc[self.pos..]
.iter()
.take_while(|k| **k!= c)
.fold(0, |c, _| c + 1)
+ self.pos
}
};
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn err<T>(&self) -> Parsed<T> {
Err(self.pos)
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum DecodeError {
/// Byte position of first parse error
ParseError(usize),
/// Invalid Argon2 parameters given in encoding
InvalidParams(ParamErr),
}
impl fmt::Display for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DecodeError::*;
match *self {
ParseError(pos) => write!(f, "Parse error at position {}", pos),
InvalidParams(ref perr) => {
write!(f, "Invalid hash parameters given by encoded: {}", perr)
}
}
}
}
impl Error for DecodeError {
fn description(&self) -> &str {
match *self {
DecodeError::ParseError(_) => "Hash string parse error.",
DecodeError::InvalidParams(ref perr) => perr.description(),
}
}
}
/// Represents a single Argon2 hashing session. A hash session comprises of the
/// hash algorithm parameters, salt, key, and data used to hash a given input.
#[derive(Debug, Eq, PartialEq)]
pub struct Encoded {
params: Argon2,
hash: Vec<u8>,
salt: Vec<u8>,
key: Vec<u8>,
data: Vec<u8>,
}
type Packed = (
Variant,
Version,
u32,
u32,
u32,
Vec<u8>,
Vec<u8>,
Vec<u8>,
Vec<u8>,
);
impl Encoded {
fn parse(encoded: &[u8]) -> Result<Packed, usize> {
let mut p = Parser {
enc: encoded,
pos: 0,
};
p.expect(b"$argon2")?;
let variant = match p.read_until('$' as u8) {
b"d" => Variant::Argon2d,
b"i" => Variant::Argon2i,
b"id" => Variant::Argon2id,
x => return Err(p.pos - x.len()),
};
p.expect(b"$")?;
let vers = match p.expect(b"v=") {
// Match the c reference impl's behavior, which defaults to a v0x10
// hash encoding since the `v=` field was only introduced with
// v0x13.
Err(_) => Version::_0x10,
Ok(()) => {
let vers = p.read_version()?;
p.expect(b",")?;
vers
}
};
p.expect(b"m=")?;
let kib = p.read_u32()?;
p.expect(b",t=")?;
let passes = p.read_u32()?;
p.expect(b",p=")?;
let lanes = p.read_u32()?;
let key = match p.expect(b",keyid=") {
Err(_) => vec![],
Ok(()) => p.decode64_till_one_of(b",$")?,
};
let data = match p.expect(b",data=") {
Ok(()) => p.decode64_till(Some(b'$'))?,
Err(_) => vec![],
};
p.expect(b"$")?;
let salt = p.decode64_till(Some(b'$'))?;
p.expect(b"$")?;
let hash = p.decode64_till(None)?;
Ok((variant, vers, kib, passes, lanes, key, data, salt, hash))
}
/// Reconstruct a previous hash session from serialized bytes.
pub fn from_u8(encoded: &[u8]) -> Result<Self, DecodeError> {
match Self::parse(encoded) {
Err(pos) => Err(DecodeError::ParseError(pos)),
Ok((v, vers, kib, passes, lanes, key, data, salt, hash)) => {
match Argon2::with_version(passes, lanes, kib, v, vers) {
Err(e) => Err(DecodeError::InvalidParams(e)),
Ok(a2) => Ok(Encoded {
params: a2,
hash: hash,
salt: salt,
key: key,
data: data,
}),
}
}
}
}
/// Serialize this hashing session into raw bytes that can later be
/// recovered by `Encoded::from_u8`.
pub fn to_u8(&self) -> Vec<u8> {
let vcode = |v| match v {
Variant::Argon2i => "i",
Variant::Argon2d => "d",
Variant::Argon2id => "id",
};
let b64 = |x| String::from_utf8(base64_no_pad(x)).unwrap();
let k_ = match &b64(&self.key[..]) {
bytes if bytes.len() > 0 => format!(",keyid={}", bytes),
_ => String::new(),
};
let x_ = match &b64(&self.data[..]) {
bytes if bytes.len() > 0 => format!(",data={}", bytes),
_ => String::new(),
};
let (var, m, t, p, vers) = self.params();
format!(
"$argon2{}$v={},m={},t={},p={}{}{}${}${}",
vcode(var),
vers as usize,
m,
t,
p,
k_,
x_,
b64(&self.salt[..]),
b64(&self.hash)
)
.into_bytes()
}
/// Generates a new hashing session from password, salt, and other byte
/// input. Parameters are:
///
/// `argon`: An `Argon2` struct representative of the desired hash algorithm
/// parameters.
///
/// `p`: Password input.
///
/// `s`: Salt.
///
/// `k`: An optional secret value.
///
/// `x`: Optional, miscellaneous associated data.
///
/// Note that `p, s, k, x` must conform to the same length constraints
/// dictated by `Argon2::hash`.
pub fn new(argon: Argon2, p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
let mut out = vec![0 as u8; defaults::LENGTH];
argon.hash(&mut out[..], p, s, k, x);
Encoded {
params: argon,
hash: out,
salt: s.iter().cloned().collect(),
key: k.iter().cloned().collect(),
data: x.iter().cloned().collect(),
}
}
/// Same as `Encoded::new`, but with the default Argon2i hash algorithm
/// parameters.
pub fn default2i(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2i), p, s, k, x)
}
/// Same as `Encoded::new`, but with the default _Argon2d_ hash algorithm
/// parameters.
pub fn default2d(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2d), p, s, k, x)
}
/// Verifies password input against the hash that was previously created in
/// this hashing session.
pub fn verify(&self, p: &[u8]) -> bool {
let mut out = [0 as u8; defaults::LENGTH];
let s = &self.salt[..];
self.params
.hash(&mut out, p, s, &self.key[..], &self.data[..]);
constant_eq(&out, &self.hash)
}
/// Provides read-only access to the Argon2 parameters of this hash.
pub fn params(&self) -> (Variant, u32, u32, u32, Version) {
self.params.params()
}
}
/// Compares two byte arrays for equality. Assumes that both are already of
/// equal length.
#[inline(never)]
pub fn constant_eq(xs: &[u8], ys: &[u8]) -> bool {
if xs.len()!= ys.len() {
false
} else {
let rv = xs.iter().zip(ys.iter()).fold(0, |rv, (x, y)| rv | (x ^ y));
// this kills the optimizer.
(1 & (rv as u32).wrapping_sub(1) >> 8).wrapping_sub(1) == 0
}
}
#[cfg(test)]
mod test {
use super::{base64_no_pad, debase64_no_pad, Encoded};
const BASE64_CASES: [(&'static [u8], &'static [u8]); 5] = [
(b"any carnal pleasure.", b"YW55IGNhcm5hbCBwbGVhc3VyZS4"),
(b"any carnal pleasure", b"YW55IGNhcm5hbCBwbGVhc3VyZQ"),
(b"any carnal pleasur", b"YW55IGNhcm5hbCBwbGVhc3Vy"),
(b"any carnal pleasu", b"YW55IGNhcm5hbCBwbGVhc3U"),
(b"any carnal pleas", b"YW55IGNhcm5hbCBwbGVhcw"),
];
const ENCODED: &'static [&'static [u8]] = &[
b"$argon2i$m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
// ^ ensures that default version is 0x10.
b"$argon2i$v=16,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
b"$argon2i$v=19,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$AvsXI+N78kGHzeGwzz0VTjfBdl7MmgvBGfJ/XXyqLbA",
];
#[test]
fn test_base64_no_pad() {
for &(s, exp) in BASE64_CASES.iter() {
assert_eq!(&base64_no_pad(s)[..], exp);
}
}
#[test]
fn test_debase64_no_pad() {
for &(exp, s) in BASE64_CASES.iter() {
assert_eq!(debase64_no_pad(s).unwrap(), exp);
}
}
#[test]
fn test_verify() {
for &hash_string in ENCODED {
let v = Encoded::from_u8(hash_string).unwrap();
assert_eq!(v.verify(b"argon2i!"), true);
assert_eq!(v.verify(b"nope"), false);
}
}
#[test]
fn encode_decode() {
for &(s, _) in BASE64_CASES.iter() {
let salt = b"Yum! Extra salty";
let key = b"ff5dfa4d7a048f9db4ad0caad82e75c";
let enc = Encoded::default2i(s, salt, key, &[]);
assert_eq!(Encoded::from_u8(&enc.to_u8()), Ok(enc));
}
}
#[test]
fn bad_encoded() {
use super::DecodeError::*;
use argon2::ParamErr::*;
let cases: &[(&'static [u8], super::DecodeError)] = &[
(b"$argon2y$v=19,m=4096", ParseError(7)),
(
b"$argon2i$v=19,m=-2,t=-4,p=-4$aaaaaaaa$ffffff",
ParseError(16),
),
// ^ negative m is invalid.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff*",
ParseError(35),
),
// ^ asterisk is invalid base64 char.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff",
InvalidParams(TooFewPasses),
),
// ^ p = 0 is invalid.
(b"$argon2i$m", ParseError(9)),
];
// ^ intentionally fail Encoded::expect with undersized input
for &(case, err) in cases.iter() {
let v = Encoded::from_u8(case);
assert!(v.is_err());
assert_eq!(v.err().unwrap(), err);
}
}
}
| {
if bytes.len() % 4 != 1 && bytes.len() > 0 {
let mut rv = vec![];
let mut pos = 0;
while pos + 4 <= bytes.len() {
let s = maybe!(triplet(&bytes[pos..pos + 4]));
rv.extend_from_slice(&s);
pos += 4;
}
if bytes.len() - pos == 2 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
rv.push(a << 2 | b >> 4);
} else if bytes.len() - pos == 3 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
let c = maybe!(delut(bytes[pos + 2]));
rv.push(a << 2 | b >> 4);
rv.push(b << 4 | c >> 2); | identifier_body |
verifier.rs | use argon2::{defaults, Argon2, ParamErr, Variant, Version};
use std::error::Error;
/// The main export here is `Encoded`. See `examples/verify.rs` for usage
/// examples.
use std::{fmt, str};
macro_rules! maybe {
($e: expr) => {
match $e {
None => return None,
Some(v) => v,
}
};
}
const LUT64: &'static [u8; 64] =
b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
fn lut(n: u8) -> u8 {
LUT64[n as usize & 0x3f]
}
fn delut(c: u8) -> Option<u8> {
match c {
43 => Some(62),
47 => Some(63),
_ if 65 <= c && c <= 90 => Some(c - 65),
_ if 97 <= c && c <= 122 => Some(c - 71),
_ if 48 <= c && c <= 57 => Some(c + 4),
_ => None,
}
}
fn quad(n: &[u8]) -> [u8; 4] {
assert_eq!(n.len(), 3);
let (b, c) = (n[1] >> 4 | n[0] << 4, n[2] >> 6 | n[1] << 2);
[lut(n[0] >> 2), lut(b), lut(c), lut(n[2])]
}
fn triplet(n: &[u8]) -> Option<[u8; 3]> {
assert_eq!(n.len(), 4);
let a = maybe!(delut(n[0]));
let b = maybe!(delut(n[1]));
let c = maybe!(delut(n[2]));
let d = maybe!(delut(n[3]));
Some([a << 2 | b >> 4, b << 4 | c >> 2, c << 6 | d])
}
fn base64_no_pad(bytes: &[u8]) -> Vec<u8> {
let mut rv = vec![];
let mut pos = 0;
while pos + 3 <= bytes.len() {
rv.extend_from_slice(&quad(&bytes[pos..pos + 3]));
pos += 3;
}
if bytes.len() - pos == 1 {
rv.push(lut(bytes[pos] >> 2));
rv.push(lut((bytes[pos] & 0x03) << 4));
} else if bytes.len() - pos == 2 {
rv.extend_from_slice(&quad(&[bytes[pos], bytes[pos + 1], 0]));
rv.pop();
}
rv
}
fn debase64_no_pad(bytes: &[u8]) -> Option<Vec<u8>> {
if bytes.len() % 4!= 1 && bytes.len() > 0 {
let mut rv = vec![];
let mut pos = 0;
while pos + 4 <= bytes.len() {
let s = maybe!(triplet(&bytes[pos..pos + 4]));
rv.extend_from_slice(&s);
pos += 4;
}
if bytes.len() - pos == 2 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
rv.push(a << 2 | b >> 4);
} else if bytes.len() - pos == 3 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
let c = maybe!(delut(bytes[pos + 2]));
rv.push(a << 2 | b >> 4);
rv.push(b << 4 | c >> 2);
}
Some(rv)
} else {
None
}
}
struct Parser<'a> {
enc: &'a [u8],
pos: usize,
}
impl<'a> fmt::Debug for Parser<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[..self.pos]))?;
write!(f, "<-- {} -->", self.pos)?;
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[self.pos..]))?;
Ok(())
}
}
type Parsed<T> = Result<T, usize>;
impl<'a> Parser<'a> {
fn expect(&mut self, exp: &[u8]) -> Parsed<()> {
assert!(self.pos < self.enc.len());
if self.enc.len() - self.pos < exp.len() || &self.enc[self.pos..self.pos + exp.len()]!= exp
{
self.err()
} else {
self.pos += exp.len();
Ok(())
}
}
fn read_until(&mut self, stopchar: u8) -> &'a [u8] {
let start = self.pos;
let stop = |c: &u8| *c == stopchar;
self.pos = match self.enc[self.pos..].iter().position(stop) {
None => self.enc.len() - 1,
Some(end) => self.pos + end,
};
&self.enc[start..self.pos]
}
fn read_u32(&mut self) -> Parsed<u32> {
let is_digit = |c: u8| 48 <= c && c <= 57;
let mut end = self.pos;
while end < self.enc.len() && is_digit(self.enc[end]) {
end += 1;
}
match str::from_utf8(&self.enc[self.pos..end]) {
Err(_) => self.err(),
Ok(s) => match s.parse() {
Err(_) => self.err(),
Ok(n) => {
self.pos = end;
Ok(n)
}
},
}
}
fn read_version(&mut self) -> Parsed<Version> {
self.read_u32().and_then(|vers| match vers {
0x10 => Ok(Version::_0x10),
0x13 => Ok(Version::_0x13),
_ => self.err(),
})
}
fn decode64_till_one_of(&mut self, char_set: &[u8]) -> Parsed<Vec<u8>> {
let end = self.enc[self.pos..]
.iter()
.position(|c| char_set.contains(c))
.map(|sub_pos| self.pos + sub_pos)
.unwrap_or_else(|| self.enc.len());
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn decode64_till(&mut self, stopchar: Option<u8>) -> Parsed<Vec<u8>> {
let end = match stopchar {
None => self.enc.len(),
Some(c) => {
self.enc[self.pos..]
.iter()
.take_while(|k| **k!= c)
.fold(0, |c, _| c + 1)
+ self.pos
}
};
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn err<T>(&self) -> Parsed<T> {
Err(self.pos)
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum DecodeError {
/// Byte position of first parse error
ParseError(usize),
/// Invalid Argon2 parameters given in encoding
InvalidParams(ParamErr),
}
impl fmt::Display for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DecodeError::*;
match *self {
ParseError(pos) => write!(f, "Parse error at position {}", pos),
InvalidParams(ref perr) => {
write!(f, "Invalid hash parameters given by encoded: {}", perr)
}
}
}
}
impl Error for DecodeError {
fn description(&self) -> &str {
match *self {
DecodeError::ParseError(_) => "Hash string parse error.",
DecodeError::InvalidParams(ref perr) => perr.description(),
}
}
}
/// Represents a single Argon2 hashing session. A hash session comprises of the
/// hash algorithm parameters, salt, key, and data used to hash a given input.
#[derive(Debug, Eq, PartialEq)]
pub struct Encoded {
params: Argon2,
hash: Vec<u8>,
salt: Vec<u8>,
key: Vec<u8>,
data: Vec<u8>,
}
type Packed = (
Variant,
Version,
u32,
u32,
u32,
Vec<u8>,
Vec<u8>,
Vec<u8>,
Vec<u8>,
);
impl Encoded {
fn parse(encoded: &[u8]) -> Result<Packed, usize> {
let mut p = Parser {
enc: encoded,
pos: 0,
};
p.expect(b"$argon2")?;
let variant = match p.read_until('$' as u8) {
b"d" => Variant::Argon2d,
b"i" => Variant::Argon2i,
b"id" => Variant::Argon2id,
x => return Err(p.pos - x.len()),
};
p.expect(b"$")?;
let vers = match p.expect(b"v=") {
// Match the c reference impl's behavior, which defaults to a v0x10
// hash encoding since the `v=` field was only introduced with
// v0x13.
Err(_) => Version::_0x10,
Ok(()) => {
let vers = p.read_version()?;
p.expect(b",")?;
vers
}
};
p.expect(b"m=")?;
let kib = p.read_u32()?;
p.expect(b",t=")?;
let passes = p.read_u32()?;
p.expect(b",p=")?;
let lanes = p.read_u32()?;
let key = match p.expect(b",keyid=") {
Err(_) => vec![],
Ok(()) => p.decode64_till_one_of(b",$")?,
};
let data = match p.expect(b",data=") {
Ok(()) => p.decode64_till(Some(b'$'))?,
Err(_) => vec![],
};
p.expect(b"$")?;
let salt = p.decode64_till(Some(b'$'))?;
p.expect(b"$")?;
let hash = p.decode64_till(None)?;
Ok((variant, vers, kib, passes, lanes, key, data, salt, hash))
}
/// Reconstruct a previous hash session from serialized bytes.
pub fn from_u8(encoded: &[u8]) -> Result<Self, DecodeError> {
match Self::parse(encoded) {
Err(pos) => Err(DecodeError::ParseError(pos)),
Ok((v, vers, kib, passes, lanes, key, data, salt, hash)) => {
match Argon2::with_version(passes, lanes, kib, v, vers) {
Err(e) => Err(DecodeError::InvalidParams(e)),
Ok(a2) => Ok(Encoded {
params: a2,
hash: hash,
salt: salt,
key: key,
data: data,
}),
}
}
}
}
/// Serialize this hashing session into raw bytes that can later be
/// recovered by `Encoded::from_u8`.
pub fn | (&self) -> Vec<u8> {
let vcode = |v| match v {
Variant::Argon2i => "i",
Variant::Argon2d => "d",
Variant::Argon2id => "id",
};
let b64 = |x| String::from_utf8(base64_no_pad(x)).unwrap();
let k_ = match &b64(&self.key[..]) {
bytes if bytes.len() > 0 => format!(",keyid={}", bytes),
_ => String::new(),
};
let x_ = match &b64(&self.data[..]) {
bytes if bytes.len() > 0 => format!(",data={}", bytes),
_ => String::new(),
};
let (var, m, t, p, vers) = self.params();
format!(
"$argon2{}$v={},m={},t={},p={}{}{}${}${}",
vcode(var),
vers as usize,
m,
t,
p,
k_,
x_,
b64(&self.salt[..]),
b64(&self.hash)
)
.into_bytes()
}
/// Generates a new hashing session from password, salt, and other byte
/// input. Parameters are:
///
/// `argon`: An `Argon2` struct representative of the desired hash algorithm
/// parameters.
///
/// `p`: Password input.
///
/// `s`: Salt.
///
/// `k`: An optional secret value.
///
/// `x`: Optional, miscellaneous associated data.
///
/// Note that `p, s, k, x` must conform to the same length constraints
/// dictated by `Argon2::hash`.
pub fn new(argon: Argon2, p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
let mut out = vec![0 as u8; defaults::LENGTH];
argon.hash(&mut out[..], p, s, k, x);
Encoded {
params: argon,
hash: out,
salt: s.iter().cloned().collect(),
key: k.iter().cloned().collect(),
data: x.iter().cloned().collect(),
}
}
/// Same as `Encoded::new`, but with the default Argon2i hash algorithm
/// parameters.
pub fn default2i(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2i), p, s, k, x)
}
/// Same as `Encoded::new`, but with the default _Argon2d_ hash algorithm
/// parameters.
pub fn default2d(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2d), p, s, k, x)
}
/// Verifies password input against the hash that was previously created in
/// this hashing session.
pub fn verify(&self, p: &[u8]) -> bool {
let mut out = [0 as u8; defaults::LENGTH];
let s = &self.salt[..];
self.params
.hash(&mut out, p, s, &self.key[..], &self.data[..]);
constant_eq(&out, &self.hash)
}
/// Provides read-only access to the Argon2 parameters of this hash.
pub fn params(&self) -> (Variant, u32, u32, u32, Version) {
self.params.params()
}
}
/// Compares two byte arrays for equality. Assumes that both are already of
/// equal length.
#[inline(never)]
pub fn constant_eq(xs: &[u8], ys: &[u8]) -> bool {
if xs.len()!= ys.len() {
false
} else {
let rv = xs.iter().zip(ys.iter()).fold(0, |rv, (x, y)| rv | (x ^ y));
// this kills the optimizer.
(1 & (rv as u32).wrapping_sub(1) >> 8).wrapping_sub(1) == 0
}
}
#[cfg(test)]
mod test {
use super::{base64_no_pad, debase64_no_pad, Encoded};
const BASE64_CASES: [(&'static [u8], &'static [u8]); 5] = [
(b"any carnal pleasure.", b"YW55IGNhcm5hbCBwbGVhc3VyZS4"),
(b"any carnal pleasure", b"YW55IGNhcm5hbCBwbGVhc3VyZQ"),
(b"any carnal pleasur", b"YW55IGNhcm5hbCBwbGVhc3Vy"),
(b"any carnal pleasu", b"YW55IGNhcm5hbCBwbGVhc3U"),
(b"any carnal pleas", b"YW55IGNhcm5hbCBwbGVhcw"),
];
const ENCODED: &'static [&'static [u8]] = &[
b"$argon2i$m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
// ^ ensures that default version is 0x10.
b"$argon2i$v=16,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
b"$argon2i$v=19,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$AvsXI+N78kGHzeGwzz0VTjfBdl7MmgvBGfJ/XXyqLbA",
];
#[test]
fn test_base64_no_pad() {
for &(s, exp) in BASE64_CASES.iter() {
assert_eq!(&base64_no_pad(s)[..], exp);
}
}
#[test]
fn test_debase64_no_pad() {
for &(exp, s) in BASE64_CASES.iter() {
assert_eq!(debase64_no_pad(s).unwrap(), exp);
}
}
#[test]
fn test_verify() {
for &hash_string in ENCODED {
let v = Encoded::from_u8(hash_string).unwrap();
assert_eq!(v.verify(b"argon2i!"), true);
assert_eq!(v.verify(b"nope"), false);
}
}
#[test]
fn encode_decode() {
for &(s, _) in BASE64_CASES.iter() {
let salt = b"Yum! Extra salty";
let key = b"ff5dfa4d7a048f9db4ad0caad82e75c";
let enc = Encoded::default2i(s, salt, key, &[]);
assert_eq!(Encoded::from_u8(&enc.to_u8()), Ok(enc));
}
}
#[test]
fn bad_encoded() {
use super::DecodeError::*;
use argon2::ParamErr::*;
let cases: &[(&'static [u8], super::DecodeError)] = &[
(b"$argon2y$v=19,m=4096", ParseError(7)),
(
b"$argon2i$v=19,m=-2,t=-4,p=-4$aaaaaaaa$ffffff",
ParseError(16),
),
// ^ negative m is invalid.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff*",
ParseError(35),
),
// ^ asterisk is invalid base64 char.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff",
InvalidParams(TooFewPasses),
),
// ^ p = 0 is invalid.
(b"$argon2i$m", ParseError(9)),
];
// ^ intentionally fail Encoded::expect with undersized input
for &(case, err) in cases.iter() {
let v = Encoded::from_u8(case);
assert!(v.is_err());
assert_eq!(v.err().unwrap(), err);
}
}
}
| to_u8 | identifier_name |
disk.rs | // Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! VM disk image file format I/O.
use std::cmp::min;
use std::fmt::Debug;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::path::Path;
use std::sync::Arc;
use async_trait::async_trait;
use base::get_filesystem_type;
use base::info;
use base::AsRawDescriptors;
use base::FileAllocate;
use base::FileReadWriteAtVolatile;
use base::FileSetLen;
use base::FileSync;
use base::PunchHole;
use base::WriteZeroesAt;
use cros_async::AllocateMode;
use cros_async::BackingMemory;
use cros_async::Executor;
use cros_async::IoSourceExt;
use thiserror::Error as ThisError;
mod asynchronous;
#[allow(unused)]
pub(crate) use asynchronous::AsyncDiskFileWrapper;
#[cfg(feature = "qcow")]
mod qcow;
#[cfg(feature = "qcow")]
pub use qcow::QcowFile;
#[cfg(feature = "qcow")]
pub use qcow::QCOW_MAGIC;
mod sys;
#[cfg(feature = "composite-disk")]
mod composite;
#[cfg(feature = "composite-disk")]
use composite::CompositeDiskFile;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC_LEN;
#[cfg(feature = "composite-disk")]
mod gpt;
#[cfg(feature = "composite-disk")]
pub use composite::create_composite_disk;
#[cfg(feature = "composite-disk")]
pub use composite::create_zero_filler;
#[cfg(feature = "composite-disk")]
pub use composite::Error as CompositeError;
#[cfg(feature = "composite-disk")]
pub use composite::ImagePartitionType;
#[cfg(feature = "composite-disk")]
pub use composite::PartitionInfo;
#[cfg(feature = "composite-disk")]
pub use gpt::Error as GptError;
#[cfg(feature = "android-sparse")]
mod android_sparse;
#[cfg(feature = "android-sparse")]
use android_sparse::AndroidSparse;
#[cfg(feature = "android-sparse")]
use android_sparse::SPARSE_HEADER_MAGIC;
/// Nesting depth limit for disk formats that can open other disk files.
pub const MAX_NESTING_DEPTH: u32 = 10;
#[derive(ThisError, Debug)]
pub enum Error {
#[error("failed to create block device: {0}")]
BlockDeviceNew(base::Error),
#[error("requested file conversion not supported")]
ConversionNotSupported,
#[cfg(feature = "android-sparse")]
#[error("failure in android sparse disk: {0}")]
CreateAndroidSparseDisk(android_sparse::Error),
#[cfg(feature = "composite-disk")]
#[error("failure in composite disk: {0}")]
CreateCompositeDisk(composite::Error),
#[error("failure creating single file disk: {0}")]
CreateSingleFileDisk(cros_async::AsyncError),
#[error("failure with fallocate: {0}")]
Fallocate(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
Fsync(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
IoFsync(io::Error),
#[error("checking host fs type: {0}")]
HostFsType(base::Error),
#[error("maximum disk nesting depth exceeded")]
MaxNestingDepthExceeded,
#[error("failure to punch hole: {0}")]
PunchHole(io::Error),
#[cfg(feature = "qcow")]
#[error("failure in qcow: {0}")]
QcowError(qcow::Error),
#[error("failed to read data: {0}")]
ReadingData(io::Error),
#[error("failed to read header: {0}")]
ReadingHeader(io::Error),
#[error("failed to read to memory: {0}")]
ReadToMem(cros_async::AsyncError),
#[error("failed to seek file: {0}")]
SeekingFile(io::Error),
#[error("failed to set file size: {0}")]
SettingFileSize(io::Error),
#[error("unknown disk type")]
UnknownType,
#[error("failed to write from memory: {0}")]
WriteFromMem(cros_async::AsyncError),
#[error("failed to write from vec: {0}")]
WriteFromVec(cros_async::AsyncError),
#[error("failed to write zeroes: {0}")]
WriteZeroes(io::Error),
#[error("failed to write data: {0}")]
WritingData(io::Error),
#[cfg(windows)]
#[error("failed to set disk file sparse: {0}")]
SetSparseFailure(io::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
/// A trait for getting the length of a disk image or raw block device.
pub trait DiskGetLen {
/// Get the current length of the disk in bytes.
fn get_len(&self) -> io::Result<u64>;
}
impl DiskGetLen for File {
fn get_len(&self) -> io::Result<u64> {
let mut s = self;
let orig_seek = s.seek(SeekFrom::Current(0))?;
let end = s.seek(SeekFrom::End(0))? as u64;
s.seek(SeekFrom::Start(orig_seek))?;
Ok(end)
}
}
/// The prerequisites necessary to support a block device.
#[rustfmt::skip] // rustfmt won't wrap the long list of trait bounds.
pub trait DiskFile:
FileSetLen
+ DiskGetLen
+ FileSync
+ FileReadWriteAtVolatile
+ PunchHole
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug
{
}
impl<
D: FileSetLen
+ DiskGetLen
+ FileSync
+ PunchHole
+ FileReadWriteAtVolatile
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug,
> DiskFile for D
{
}
/// A `DiskFile` that can be converted for asychronous access.
pub trait ToAsyncDisk: AsRawDescriptors + DiskGetLen + Send {
/// Convert a boxed self in to a box-wrapped implementaiton of AsyncDisk.
/// Used to convert a standard disk image to an async disk image. This conversion and the
/// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is
/// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned
/// to the main device thread if the block device is destroyed or reset.
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>;
}
impl ToAsyncDisk for File {
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> {
Ok(Box::new(SingleFileDisk::new(*self, ex)?))
}
}
/// The variants of image files on the host that can be used as virtual disks.
#[derive(Debug, PartialEq, Eq)]
pub enum ImageType {
Raw,
Qcow2,
CompositeDisk,
AndroidSparse,
}
fn log_host_fs_type(file: &File) -> Result<()> {
let fstype = get_filesystem_type(file).map_err(Error::HostFsType)?;
info!("Disk image file is hosted on file system type {:x}", fstype);
Ok(())
}
/// Detect the type of an image file by checking for a valid header of the supported formats.
pub fn detect_image_type(file: &File) -> Result<ImageType> {
let mut f = file;
let disk_size = f.get_len().map_err(Error::SeekingFile)?;
let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?;
f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?;
info!("disk size {}, ", disk_size);
log_host_fs_type(f)?;
// Try to read the disk in a nicely-aligned block size unless the whole file is smaller.
const MAGIC_BLOCK_SIZE: usize = 4096;
#[repr(align(4096))]
struct BlockAlignedBuffer {
data: [u8; MAGIC_BLOCK_SIZE],
}
let mut magic = BlockAlignedBuffer {
data: [0u8; MAGIC_BLOCK_SIZE],
};
let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 {
MAGIC_BLOCK_SIZE
} else {
// This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and
// therefore is representable in usize.
disk_size as usize
};
f.read_exact(&mut magic.data[0..magic_read_len])
.map_err(Error::ReadingHeader)?;
f.seek(SeekFrom::Start(orig_seek))
.map_err(Error::SeekingFile)?;
#[cfg(feature = "composite-disk")]
if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) {
if cdisk_magic == CDISK_MAGIC.as_bytes() {
return Ok(ImageType::CompositeDisk);
}
}
#[allow(unused_variables)] // magic4 is only used with the qcow or android-sparse features.
if let Some(magic4) = magic.data.get(0..4) {
#[cfg(feature = "qcow")]
if magic4 == QCOW_MAGIC.to_be_bytes() {
return Ok(ImageType::Qcow2);
}
#[cfg(feature = "android-sparse")]
if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() {
return Ok(ImageType::AndroidSparse);
}
}
Ok(ImageType::Raw)
}
/// Inspect the image file type and create an appropriate disk file to match it.
pub fn create_disk_file(
raw_image: File,
is_sparse_file: bool,
// max_nesting_depth is only used if the composite-disk or qcow features are enabled.
#[allow(unused_variables)] mut max_nesting_depth: u32,
// image_path is only used if the composite-disk feature is enabled.
#[allow(unused_variables)] image_path: &Path,
) -> Result<Box<dyn DiskFile>> {
if max_nesting_depth == 0 {
return Err(Error::MaxNestingDepthExceeded);
}
#[allow(unused_assignments)]
{
max_nesting_depth -= 1;
}
let image_type = detect_image_type(&raw_image)?;
Ok(match image_type {
ImageType::Raw => {
sys::apply_raw_disk_file_options(&raw_image, is_sparse_file)?;
Box::new(raw_image) as Box<dyn DiskFile>
}
#[cfg(feature = "qcow")]
ImageType::Qcow2 => {
Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?)
as Box<dyn DiskFile>
}
#[cfg(feature = "composite-disk")]
ImageType::CompositeDisk => {
// Valid composite disk header present
Box::new(
CompositeDiskFile::from_file(
raw_image,
is_sparse_file,
max_nesting_depth,
image_path,
)
.map_err(Error::CreateCompositeDisk)?,
) as Box<dyn DiskFile>
}
#[cfg(feature = "android-sparse")]
ImageType::AndroidSparse => {
Box::new(AndroidSparse::from_file(raw_image).map_err(Error::CreateAndroidSparseDisk)?)
as Box<dyn DiskFile>
}
#[allow(unreachable_patterns)]
_ => return Err(Error::UnknownType),
})
}
/// An asynchronously accessible disk.
#[async_trait(?Send)]
pub trait AsyncDisk: DiskGetLen + FileSetLen + FileAllocate {
/// Returns the inner file consuming self.
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile>;
/// Asynchronously fsyncs any completed operations to the disk.
async fn fsync(&self) -> Result<()>;
/// Reads from the file at 'file_offset' in to memory `mem` at `mem_offsets`.
/// `mem_offsets` is similar to an iovec except relative to the start of `mem`.
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Writes to the file at 'file_offset' from memory `mem` at `mem_offsets`.
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Replaces a range of bytes with a hole.
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()>;
/// Writes up to `length` bytes of zeroes to the stream, returning how many bytes were written.
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()>;
}
/// A disk backed by a single file that implements `AsyncDisk` for access.
pub struct SingleFileDisk {
inner: Box<dyn IoSourceExt<File>>,
}
impl SingleFileDisk {
pub fn new(disk: File, ex: &Executor) -> Result<Self> {
ex.async_from(disk)
.map_err(Error::CreateSingleFileDisk)
.map(|inner| SingleFileDisk { inner })
}
}
impl DiskGetLen for SingleFileDisk {
fn get_len(&self) -> io::Result<u64> {
self.inner.as_source().get_len()
}
}
impl FileSetLen for SingleFileDisk {
fn set_len(&self, len: u64) -> io::Result<()> {
self.inner.as_source().set_len(len)
}
}
impl FileAllocate for SingleFileDisk {
fn allocate(&mut self, offset: u64, len: u64) -> io::Result<()> {
self.inner.as_source_mut().allocate(offset, len)
}
}
#[async_trait(?Send)]
impl AsyncDisk for SingleFileDisk {
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile> {
Box::new(self.inner.into_source())
}
async fn fsync(&self) -> Result<()> {
self.inner.fsync().await.map_err(Error::Fsync)
}
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.read_to_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::ReadToMem)
}
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.write_from_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::WriteFromMem)
}
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()> {
self.inner
.fallocate(file_offset, length, AllocateMode::PunchHole)
.await
.map_err(Error::Fallocate)
}
async fn | (&self, file_offset: u64, length: u64) -> Result<()> {
if self
.inner
.fallocate(file_offset, length, AllocateMode::ZeroRange)
.await
.is_ok()
{
return Ok(());
}
// Fall back to writing zeros if fallocate doesn't work.
let buf_size = min(length, 0x10000);
let mut nwritten = 0;
while nwritten < length {
let remaining = length - nwritten;
let write_size = min(remaining, buf_size) as usize;
let buf = vec![0u8; write_size];
nwritten += self
.inner
.write_from_vec(Some(file_offset + nwritten as u64), buf)
.await
.map(|(n, _)| n as u64)
.map_err(Error::WriteFromVec)?;
}
Ok(())
}
}
| write_zeroes_at | identifier_name |
disk.rs | // Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! VM disk image file format I/O.
use std::cmp::min;
use std::fmt::Debug;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::path::Path;
use std::sync::Arc;
use async_trait::async_trait;
use base::get_filesystem_type;
use base::info;
use base::AsRawDescriptors;
use base::FileAllocate;
use base::FileReadWriteAtVolatile;
use base::FileSetLen;
use base::FileSync;
use base::PunchHole;
use base::WriteZeroesAt;
use cros_async::AllocateMode;
use cros_async::BackingMemory;
use cros_async::Executor;
use cros_async::IoSourceExt;
use thiserror::Error as ThisError;
mod asynchronous;
#[allow(unused)]
pub(crate) use asynchronous::AsyncDiskFileWrapper;
#[cfg(feature = "qcow")]
mod qcow;
#[cfg(feature = "qcow")]
pub use qcow::QcowFile;
#[cfg(feature = "qcow")]
pub use qcow::QCOW_MAGIC;
mod sys;
#[cfg(feature = "composite-disk")]
mod composite;
#[cfg(feature = "composite-disk")]
use composite::CompositeDiskFile;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC_LEN;
#[cfg(feature = "composite-disk")]
mod gpt;
#[cfg(feature = "composite-disk")]
pub use composite::create_composite_disk;
#[cfg(feature = "composite-disk")]
pub use composite::create_zero_filler;
#[cfg(feature = "composite-disk")]
pub use composite::Error as CompositeError;
#[cfg(feature = "composite-disk")]
pub use composite::ImagePartitionType;
#[cfg(feature = "composite-disk")]
pub use composite::PartitionInfo;
#[cfg(feature = "composite-disk")]
pub use gpt::Error as GptError;
#[cfg(feature = "android-sparse")]
mod android_sparse;
#[cfg(feature = "android-sparse")]
use android_sparse::AndroidSparse;
#[cfg(feature = "android-sparse")]
use android_sparse::SPARSE_HEADER_MAGIC;
/// Nesting depth limit for disk formats that can open other disk files.
pub const MAX_NESTING_DEPTH: u32 = 10;
#[derive(ThisError, Debug)]
pub enum Error {
#[error("failed to create block device: {0}")]
BlockDeviceNew(base::Error),
#[error("requested file conversion not supported")]
ConversionNotSupported,
#[cfg(feature = "android-sparse")]
#[error("failure in android sparse disk: {0}")]
CreateAndroidSparseDisk(android_sparse::Error),
#[cfg(feature = "composite-disk")]
#[error("failure in composite disk: {0}")]
CreateCompositeDisk(composite::Error),
#[error("failure creating single file disk: {0}")]
CreateSingleFileDisk(cros_async::AsyncError),
#[error("failure with fallocate: {0}")]
Fallocate(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
Fsync(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
IoFsync(io::Error),
#[error("checking host fs type: {0}")]
HostFsType(base::Error),
#[error("maximum disk nesting depth exceeded")]
MaxNestingDepthExceeded,
#[error("failure to punch hole: {0}")]
PunchHole(io::Error),
#[cfg(feature = "qcow")]
#[error("failure in qcow: {0}")]
QcowError(qcow::Error),
#[error("failed to read data: {0}")]
ReadingData(io::Error),
#[error("failed to read header: {0}")]
ReadingHeader(io::Error),
#[error("failed to read to memory: {0}")]
ReadToMem(cros_async::AsyncError),
#[error("failed to seek file: {0}")]
SeekingFile(io::Error),
#[error("failed to set file size: {0}")]
SettingFileSize(io::Error),
#[error("unknown disk type")]
UnknownType,
#[error("failed to write from memory: {0}")]
WriteFromMem(cros_async::AsyncError),
#[error("failed to write from vec: {0}")]
WriteFromVec(cros_async::AsyncError),
#[error("failed to write zeroes: {0}")]
WriteZeroes(io::Error),
#[error("failed to write data: {0}")]
WritingData(io::Error),
#[cfg(windows)]
#[error("failed to set disk file sparse: {0}")]
SetSparseFailure(io::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
/// A trait for getting the length of a disk image or raw block device.
pub trait DiskGetLen {
/// Get the current length of the disk in bytes.
fn get_len(&self) -> io::Result<u64>;
}
impl DiskGetLen for File {
fn get_len(&self) -> io::Result<u64> {
let mut s = self;
let orig_seek = s.seek(SeekFrom::Current(0))?;
let end = s.seek(SeekFrom::End(0))? as u64;
s.seek(SeekFrom::Start(orig_seek))?;
Ok(end)
}
}
/// The prerequisites necessary to support a block device.
#[rustfmt::skip] // rustfmt won't wrap the long list of trait bounds.
pub trait DiskFile:
FileSetLen
+ DiskGetLen
+ FileSync
+ FileReadWriteAtVolatile
+ PunchHole
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug
{
}
impl<
D: FileSetLen
+ DiskGetLen
+ FileSync
+ PunchHole
+ FileReadWriteAtVolatile
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug,
> DiskFile for D
{
}
/// A `DiskFile` that can be converted for asychronous access.
pub trait ToAsyncDisk: AsRawDescriptors + DiskGetLen + Send {
/// Convert a boxed self in to a box-wrapped implementaiton of AsyncDisk.
/// Used to convert a standard disk image to an async disk image. This conversion and the
/// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is
/// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned
/// to the main device thread if the block device is destroyed or reset.
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>;
}
impl ToAsyncDisk for File {
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> {
Ok(Box::new(SingleFileDisk::new(*self, ex)?))
}
}
/// The variants of image files on the host that can be used as virtual disks.
#[derive(Debug, PartialEq, Eq)]
pub enum ImageType {
Raw,
Qcow2,
CompositeDisk,
AndroidSparse,
}
fn log_host_fs_type(file: &File) -> Result<()> {
let fstype = get_filesystem_type(file).map_err(Error::HostFsType)?;
info!("Disk image file is hosted on file system type {:x}", fstype);
Ok(())
}
/// Detect the type of an image file by checking for a valid header of the supported formats.
pub fn detect_image_type(file: &File) -> Result<ImageType> {
let mut f = file;
let disk_size = f.get_len().map_err(Error::SeekingFile)?;
let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?;
f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?;
info!("disk size {}, ", disk_size);
log_host_fs_type(f)?;
// Try to read the disk in a nicely-aligned block size unless the whole file is smaller.
const MAGIC_BLOCK_SIZE: usize = 4096;
#[repr(align(4096))]
struct BlockAlignedBuffer {
data: [u8; MAGIC_BLOCK_SIZE],
}
let mut magic = BlockAlignedBuffer {
data: [0u8; MAGIC_BLOCK_SIZE],
};
let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 {
MAGIC_BLOCK_SIZE
} else {
// This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and
// therefore is representable in usize.
disk_size as usize
};
f.read_exact(&mut magic.data[0..magic_read_len])
.map_err(Error::ReadingHeader)?;
f.seek(SeekFrom::Start(orig_seek))
.map_err(Error::SeekingFile)?;
#[cfg(feature = "composite-disk")]
if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) {
if cdisk_magic == CDISK_MAGIC.as_bytes() {
return Ok(ImageType::CompositeDisk);
}
}
#[allow(unused_variables)] // magic4 is only used with the qcow or android-sparse features.
if let Some(magic4) = magic.data.get(0..4) {
#[cfg(feature = "qcow")]
if magic4 == QCOW_MAGIC.to_be_bytes() {
return Ok(ImageType::Qcow2);
}
#[cfg(feature = "android-sparse")]
if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() {
return Ok(ImageType::AndroidSparse);
}
}
Ok(ImageType::Raw)
}
| // max_nesting_depth is only used if the composite-disk or qcow features are enabled.
#[allow(unused_variables)] mut max_nesting_depth: u32,
// image_path is only used if the composite-disk feature is enabled.
#[allow(unused_variables)] image_path: &Path,
) -> Result<Box<dyn DiskFile>> {
if max_nesting_depth == 0 {
return Err(Error::MaxNestingDepthExceeded);
}
#[allow(unused_assignments)]
{
max_nesting_depth -= 1;
}
let image_type = detect_image_type(&raw_image)?;
Ok(match image_type {
ImageType::Raw => {
sys::apply_raw_disk_file_options(&raw_image, is_sparse_file)?;
Box::new(raw_image) as Box<dyn DiskFile>
}
#[cfg(feature = "qcow")]
ImageType::Qcow2 => {
Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?)
as Box<dyn DiskFile>
}
#[cfg(feature = "composite-disk")]
ImageType::CompositeDisk => {
// Valid composite disk header present
Box::new(
CompositeDiskFile::from_file(
raw_image,
is_sparse_file,
max_nesting_depth,
image_path,
)
.map_err(Error::CreateCompositeDisk)?,
) as Box<dyn DiskFile>
}
#[cfg(feature = "android-sparse")]
ImageType::AndroidSparse => {
Box::new(AndroidSparse::from_file(raw_image).map_err(Error::CreateAndroidSparseDisk)?)
as Box<dyn DiskFile>
}
#[allow(unreachable_patterns)]
_ => return Err(Error::UnknownType),
})
}
/// An asynchronously accessible disk.
#[async_trait(?Send)]
pub trait AsyncDisk: DiskGetLen + FileSetLen + FileAllocate {
/// Returns the inner file consuming self.
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile>;
/// Asynchronously fsyncs any completed operations to the disk.
async fn fsync(&self) -> Result<()>;
/// Reads from the file at 'file_offset' in to memory `mem` at `mem_offsets`.
/// `mem_offsets` is similar to an iovec except relative to the start of `mem`.
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Writes to the file at 'file_offset' from memory `mem` at `mem_offsets`.
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Replaces a range of bytes with a hole.
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()>;
/// Writes up to `length` bytes of zeroes to the stream, returning how many bytes were written.
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()>;
}
/// A disk backed by a single file that implements `AsyncDisk` for access.
pub struct SingleFileDisk {
inner: Box<dyn IoSourceExt<File>>,
}
impl SingleFileDisk {
pub fn new(disk: File, ex: &Executor) -> Result<Self> {
ex.async_from(disk)
.map_err(Error::CreateSingleFileDisk)
.map(|inner| SingleFileDisk { inner })
}
}
impl DiskGetLen for SingleFileDisk {
fn get_len(&self) -> io::Result<u64> {
self.inner.as_source().get_len()
}
}
impl FileSetLen for SingleFileDisk {
fn set_len(&self, len: u64) -> io::Result<()> {
self.inner.as_source().set_len(len)
}
}
impl FileAllocate for SingleFileDisk {
fn allocate(&mut self, offset: u64, len: u64) -> io::Result<()> {
self.inner.as_source_mut().allocate(offset, len)
}
}
#[async_trait(?Send)]
impl AsyncDisk for SingleFileDisk {
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile> {
Box::new(self.inner.into_source())
}
async fn fsync(&self) -> Result<()> {
self.inner.fsync().await.map_err(Error::Fsync)
}
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.read_to_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::ReadToMem)
}
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.write_from_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::WriteFromMem)
}
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()> {
self.inner
.fallocate(file_offset, length, AllocateMode::PunchHole)
.await
.map_err(Error::Fallocate)
}
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()> {
if self
.inner
.fallocate(file_offset, length, AllocateMode::ZeroRange)
.await
.is_ok()
{
return Ok(());
}
// Fall back to writing zeros if fallocate doesn't work.
let buf_size = min(length, 0x10000);
let mut nwritten = 0;
while nwritten < length {
let remaining = length - nwritten;
let write_size = min(remaining, buf_size) as usize;
let buf = vec![0u8; write_size];
nwritten += self
.inner
.write_from_vec(Some(file_offset + nwritten as u64), buf)
.await
.map(|(n, _)| n as u64)
.map_err(Error::WriteFromVec)?;
}
Ok(())
}
} | /// Inspect the image file type and create an appropriate disk file to match it.
pub fn create_disk_file(
raw_image: File,
is_sparse_file: bool, | random_line_split |
disk.rs | // Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! VM disk image file format I/O.
use std::cmp::min;
use std::fmt::Debug;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::path::Path;
use std::sync::Arc;
use async_trait::async_trait;
use base::get_filesystem_type;
use base::info;
use base::AsRawDescriptors;
use base::FileAllocate;
use base::FileReadWriteAtVolatile;
use base::FileSetLen;
use base::FileSync;
use base::PunchHole;
use base::WriteZeroesAt;
use cros_async::AllocateMode;
use cros_async::BackingMemory;
use cros_async::Executor;
use cros_async::IoSourceExt;
use thiserror::Error as ThisError;
mod asynchronous;
#[allow(unused)]
pub(crate) use asynchronous::AsyncDiskFileWrapper;
#[cfg(feature = "qcow")]
mod qcow;
#[cfg(feature = "qcow")]
pub use qcow::QcowFile;
#[cfg(feature = "qcow")]
pub use qcow::QCOW_MAGIC;
mod sys;
#[cfg(feature = "composite-disk")]
mod composite;
#[cfg(feature = "composite-disk")]
use composite::CompositeDiskFile;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC_LEN;
#[cfg(feature = "composite-disk")]
mod gpt;
#[cfg(feature = "composite-disk")]
pub use composite::create_composite_disk;
#[cfg(feature = "composite-disk")]
pub use composite::create_zero_filler;
#[cfg(feature = "composite-disk")]
pub use composite::Error as CompositeError;
#[cfg(feature = "composite-disk")]
pub use composite::ImagePartitionType;
#[cfg(feature = "composite-disk")]
pub use composite::PartitionInfo;
#[cfg(feature = "composite-disk")]
pub use gpt::Error as GptError;
#[cfg(feature = "android-sparse")]
mod android_sparse;
#[cfg(feature = "android-sparse")]
use android_sparse::AndroidSparse;
#[cfg(feature = "android-sparse")]
use android_sparse::SPARSE_HEADER_MAGIC;
/// Nesting depth limit for disk formats that can open other disk files.
pub const MAX_NESTING_DEPTH: u32 = 10;
#[derive(ThisError, Debug)]
pub enum Error {
#[error("failed to create block device: {0}")]
BlockDeviceNew(base::Error),
#[error("requested file conversion not supported")]
ConversionNotSupported,
#[cfg(feature = "android-sparse")]
#[error("failure in android sparse disk: {0}")]
CreateAndroidSparseDisk(android_sparse::Error),
#[cfg(feature = "composite-disk")]
#[error("failure in composite disk: {0}")]
CreateCompositeDisk(composite::Error),
#[error("failure creating single file disk: {0}")]
CreateSingleFileDisk(cros_async::AsyncError),
#[error("failure with fallocate: {0}")]
Fallocate(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
Fsync(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
IoFsync(io::Error),
#[error("checking host fs type: {0}")]
HostFsType(base::Error),
#[error("maximum disk nesting depth exceeded")]
MaxNestingDepthExceeded,
#[error("failure to punch hole: {0}")]
PunchHole(io::Error),
#[cfg(feature = "qcow")]
#[error("failure in qcow: {0}")]
QcowError(qcow::Error),
#[error("failed to read data: {0}")]
ReadingData(io::Error),
#[error("failed to read header: {0}")]
ReadingHeader(io::Error),
#[error("failed to read to memory: {0}")]
ReadToMem(cros_async::AsyncError),
#[error("failed to seek file: {0}")]
SeekingFile(io::Error),
#[error("failed to set file size: {0}")]
SettingFileSize(io::Error),
#[error("unknown disk type")]
UnknownType,
#[error("failed to write from memory: {0}")]
WriteFromMem(cros_async::AsyncError),
#[error("failed to write from vec: {0}")]
WriteFromVec(cros_async::AsyncError),
#[error("failed to write zeroes: {0}")]
WriteZeroes(io::Error),
#[error("failed to write data: {0}")]
WritingData(io::Error),
#[cfg(windows)]
#[error("failed to set disk file sparse: {0}")]
SetSparseFailure(io::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
/// A trait for getting the length of a disk image or raw block device.
pub trait DiskGetLen {
/// Get the current length of the disk in bytes.
fn get_len(&self) -> io::Result<u64>;
}
impl DiskGetLen for File {
fn get_len(&self) -> io::Result<u64> {
let mut s = self;
let orig_seek = s.seek(SeekFrom::Current(0))?;
let end = s.seek(SeekFrom::End(0))? as u64;
s.seek(SeekFrom::Start(orig_seek))?;
Ok(end)
}
}
/// The prerequisites necessary to support a block device.
#[rustfmt::skip] // rustfmt won't wrap the long list of trait bounds.
pub trait DiskFile:
FileSetLen
+ DiskGetLen
+ FileSync
+ FileReadWriteAtVolatile
+ PunchHole
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug
{
}
impl<
D: FileSetLen
+ DiskGetLen
+ FileSync
+ PunchHole
+ FileReadWriteAtVolatile
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug,
> DiskFile for D
{
}
/// A `DiskFile` that can be converted for asychronous access.
pub trait ToAsyncDisk: AsRawDescriptors + DiskGetLen + Send {
/// Convert a boxed self in to a box-wrapped implementaiton of AsyncDisk.
/// Used to convert a standard disk image to an async disk image. This conversion and the
/// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is
/// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned
/// to the main device thread if the block device is destroyed or reset.
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>;
}
impl ToAsyncDisk for File {
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> {
Ok(Box::new(SingleFileDisk::new(*self, ex)?))
}
}
/// The variants of image files on the host that can be used as virtual disks.
#[derive(Debug, PartialEq, Eq)]
pub enum ImageType {
Raw,
Qcow2,
CompositeDisk,
AndroidSparse,
}
fn log_host_fs_type(file: &File) -> Result<()> {
let fstype = get_filesystem_type(file).map_err(Error::HostFsType)?;
info!("Disk image file is hosted on file system type {:x}", fstype);
Ok(())
}
/// Detect the type of an image file by checking for a valid header of the supported formats.
pub fn detect_image_type(file: &File) -> Result<ImageType> {
let mut f = file;
let disk_size = f.get_len().map_err(Error::SeekingFile)?;
let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?;
f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?;
info!("disk size {}, ", disk_size);
log_host_fs_type(f)?;
// Try to read the disk in a nicely-aligned block size unless the whole file is smaller.
const MAGIC_BLOCK_SIZE: usize = 4096;
#[repr(align(4096))]
struct BlockAlignedBuffer {
data: [u8; MAGIC_BLOCK_SIZE],
}
let mut magic = BlockAlignedBuffer {
data: [0u8; MAGIC_BLOCK_SIZE],
};
let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 {
MAGIC_BLOCK_SIZE
} else {
// This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and
// therefore is representable in usize.
disk_size as usize
};
f.read_exact(&mut magic.data[0..magic_read_len])
.map_err(Error::ReadingHeader)?;
f.seek(SeekFrom::Start(orig_seek))
.map_err(Error::SeekingFile)?;
#[cfg(feature = "composite-disk")]
if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) {
if cdisk_magic == CDISK_MAGIC.as_bytes() {
return Ok(ImageType::CompositeDisk);
}
}
#[allow(unused_variables)] // magic4 is only used with the qcow or android-sparse features.
if let Some(magic4) = magic.data.get(0..4) {
#[cfg(feature = "qcow")]
if magic4 == QCOW_MAGIC.to_be_bytes() {
return Ok(ImageType::Qcow2);
}
#[cfg(feature = "android-sparse")]
if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() {
return Ok(ImageType::AndroidSparse);
}
}
Ok(ImageType::Raw)
}
/// Inspect the image file type and create an appropriate disk file to match it.
pub fn create_disk_file(
raw_image: File,
is_sparse_file: bool,
// max_nesting_depth is only used if the composite-disk or qcow features are enabled.
#[allow(unused_variables)] mut max_nesting_depth: u32,
// image_path is only used if the composite-disk feature is enabled.
#[allow(unused_variables)] image_path: &Path,
) -> Result<Box<dyn DiskFile>> {
if max_nesting_depth == 0 {
return Err(Error::MaxNestingDepthExceeded);
}
#[allow(unused_assignments)]
{
max_nesting_depth -= 1;
}
let image_type = detect_image_type(&raw_image)?;
Ok(match image_type {
ImageType::Raw => |
#[cfg(feature = "qcow")]
ImageType::Qcow2 => {
Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?)
as Box<dyn DiskFile>
}
#[cfg(feature = "composite-disk")]
ImageType::CompositeDisk => {
// Valid composite disk header present
Box::new(
CompositeDiskFile::from_file(
raw_image,
is_sparse_file,
max_nesting_depth,
image_path,
)
.map_err(Error::CreateCompositeDisk)?,
) as Box<dyn DiskFile>
}
#[cfg(feature = "android-sparse")]
ImageType::AndroidSparse => {
Box::new(AndroidSparse::from_file(raw_image).map_err(Error::CreateAndroidSparseDisk)?)
as Box<dyn DiskFile>
}
#[allow(unreachable_patterns)]
_ => return Err(Error::UnknownType),
})
}
/// An asynchronously accessible disk.
#[async_trait(?Send)]
pub trait AsyncDisk: DiskGetLen + FileSetLen + FileAllocate {
/// Returns the inner file consuming self.
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile>;
/// Asynchronously fsyncs any completed operations to the disk.
async fn fsync(&self) -> Result<()>;
/// Reads from the file at 'file_offset' in to memory `mem` at `mem_offsets`.
/// `mem_offsets` is similar to an iovec except relative to the start of `mem`.
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Writes to the file at 'file_offset' from memory `mem` at `mem_offsets`.
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Replaces a range of bytes with a hole.
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()>;
/// Writes up to `length` bytes of zeroes to the stream, returning how many bytes were written.
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()>;
}
/// A disk backed by a single file that implements `AsyncDisk` for access.
pub struct SingleFileDisk {
inner: Box<dyn IoSourceExt<File>>,
}
impl SingleFileDisk {
pub fn new(disk: File, ex: &Executor) -> Result<Self> {
ex.async_from(disk)
.map_err(Error::CreateSingleFileDisk)
.map(|inner| SingleFileDisk { inner })
}
}
impl DiskGetLen for SingleFileDisk {
fn get_len(&self) -> io::Result<u64> {
self.inner.as_source().get_len()
}
}
impl FileSetLen for SingleFileDisk {
fn set_len(&self, len: u64) -> io::Result<()> {
self.inner.as_source().set_len(len)
}
}
impl FileAllocate for SingleFileDisk {
fn allocate(&mut self, offset: u64, len: u64) -> io::Result<()> {
self.inner.as_source_mut().allocate(offset, len)
}
}
#[async_trait(?Send)]
impl AsyncDisk for SingleFileDisk {
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile> {
Box::new(self.inner.into_source())
}
async fn fsync(&self) -> Result<()> {
self.inner.fsync().await.map_err(Error::Fsync)
}
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.read_to_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::ReadToMem)
}
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.write_from_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::WriteFromMem)
}
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()> {
self.inner
.fallocate(file_offset, length, AllocateMode::PunchHole)
.await
.map_err(Error::Fallocate)
}
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()> {
if self
.inner
.fallocate(file_offset, length, AllocateMode::ZeroRange)
.await
.is_ok()
{
return Ok(());
}
// Fall back to writing zeros if fallocate doesn't work.
let buf_size = min(length, 0x10000);
let mut nwritten = 0;
while nwritten < length {
let remaining = length - nwritten;
let write_size = min(remaining, buf_size) as usize;
let buf = vec![0u8; write_size];
nwritten += self
.inner
.write_from_vec(Some(file_offset + nwritten as u64), buf)
.await
.map(|(n, _)| n as u64)
.map_err(Error::WriteFromVec)?;
}
Ok(())
}
}
| {
sys::apply_raw_disk_file_options(&raw_image, is_sparse_file)?;
Box::new(raw_image) as Box<dyn DiskFile>
} | conditional_block |
error_format.rs | pub mod data;
use crate::data::tokens::Span;
use crate::data::{position::Position, warnings::Warnings, Interval};
use nom::{
error::{ContextError, ErrorKind, ParseError},
*,
};
pub use crate::data::error_info::ErrorInfo;
pub use data::CustomError;
// TODO: add link to docs
// Parsing Errors
pub const ERROR_PARENTHESES: &str = "list elem type (... ) not found";
pub const ERROR_PARENTHESES_END: &str =
"Invalid argument. Expecting one ',' between each argument or ')' to end the list";
pub const ERROR_NUMBER_AS_IDENT: &str = "Int/Float can't be used as identifier";
pub const ERROR_FLOW_STEP: &str = "syntax error.";
pub const ERROR_RESERVED: &str = "reserved keyword can't be used as identifier";
pub const ERROR_PARSING: &str =
"Invalid argument. One of the action keywords [say, do, if,...] is missing";
pub const ERROR_REMEMBER: &str =
"'remember' must be assigning to a variable via '='. Example:'remember key = value'";
pub const ERROR_USE: &str =
"'use' must be assigning a variable with keyword 'as'. Example: 'use value as key'";
pub const ERROR_ACTION_ARGUMENT: &str =
"expecting valid argument after action keywords. Example: say value";
pub const ERROR_IMPORT_ARGUMENT: &str =
"'import' expecting valid function name. Example: 'import function from flow'";
pub const ERROR_INSERT_ARGUMENT: &str =
"'insert' expecting valid step name. Example: 'insert step from flow'";
pub const ERROR_BREAK: &str = "break can only be used inside loops";
pub const ERROR_RETURN: &str = "return expects a value to return";
pub const ERROR_LEFT_BRACE: &str = "expecting '{'";
pub const ERROR_RIGHT_BRACE: &str = "expecting '}'";
pub const ERROR_RIGHT_BRACKET: &str = "expecting ']'";
pub const ERROR_GOTO_STEP: &str = "missing step name after goto";
pub const ERROR_IMPORT_STEP: &str = "missing step name after import";
pub const ERROR_DOUBLE_QUOTE: &str = "expecting '\"' to end string";
pub const ERROR_DOUBLE_OPEN_BRACE: &str = "expecting '{{' to begin expandable string";
pub const ERROR_DOUBLE_CLOSE_BRACE: &str = "expecting '}}' to end expandable string";
pub const ERROR_UNREACHABLE: &str = "unreachable";
pub const ERROR_WRONG_ARGUMENT_EXPANDABLE_STRING: &str =
"wrong argument(s) given to expandable string";
pub const ERROR_FN_SCOPE: &str =
"invalid action. Use a valid action for this type of scope [do, if, return,...]"; //\ndoc: https://docs.csml.dev/language/native-csml-functions
// Linter Errors
pub const ERROR_NO_FLOW: &str = "bot must have at least one flow";
// ##Interpreter Errors
// ### Validation
pub const ERROR_STEP_EXIST: &str = "step does not exist";
pub const ERROR_INVALID_FLOW: &str = "invalid flow: ";
pub const ERROR_START_INSTRUCTIONS: &str =
"to start an action one of the following instructions is expected: [say, do, if, foreach, goto]";
pub const ERROR_FOREACH: &str =
"foreach only accepts iterable elements like arrays and strings. Example: foreach(elem) in [1, 2, 3]";
pub const ERROR_FIND_BY_INDEX: &str =
"index must be of type int or string. Example var.[42] or var.[\"key\"]";
pub const ERROR_ASSIGN_IDENT: &str = "key must be of type identifier";
pub const ERROR_SIZE_IDENT: &str = "key can't be longer than 255 character";
pub const ERROR_NUMBER_AS_KEY: &str = "Int/Float can't be used as key";
pub const ERROR_KEY_ALPHANUMERIC: &str = "key must be alphanumeric";
pub const ERROR_FUNCTIONS_ARGS: &str = "function arguments must be in an array";
pub const ERROR_EXPR_TO_LITERAL: &str = "expression can't be converted to Literal";
pub const ERROR_PAYLOAD_EXCEED_MAX_SIZE: &str = "payload exceeds max payload size (16kb)";
pub const ERROR_STEP_LIMIT: &str =
"[Infinite loop] Step limit reached: 100 steps where executed in a single run";
// Event
pub const ERROR_EVENT_CONTENT_TYPE: &str = "event can only be of ContentType::Event";
// Goto
pub const ERROR_GOTO_VAR: &str = "variables in goto need to resolve as strings";
// Component
pub const ERROR_COMPONENT_NAMESPACE: &str = "component must have a function applied";
pub const ERROR_COMPONENT_UNKNOWN: &str = "function does not exist for component";
// Fn API
pub const ERROR_FN_ID: &str = "App name must be of type string";
pub const ERROR_FN_ENDPOINT: &str = "App can not be called because apps_endpoint is not set in bot";
pub const ERROR_FAIL_RESPONSE_JSON: &str = "failed to read response as JSON";
// ### Import
pub const ERROR_IMPORT_FAIL: &str = "import failed at";
pub const ERROR_IMPORT_STEP_FLOW: &str = "step not found in flow";
// ### Variables
pub const ERROR_GET_VAR_INFO: &str = "Expression must be a variable";
pub const ERROR_JSON_TO_LITERAL: &str = "Number is larger than a 64-bit integer";
// ### Memory
pub const ERROR_STEP_MEMORY: &str = "Variable does not exist in step's memory";
pub const ERROR_FIND_MEMORY: &str = "is used before it was saved in memory";
// ### Functions
pub const ERROR_FN_ARGS: &str = "function arguments are not valid";
pub const ERROR_FN_COLON: &str =
"Expecting ':' at the end of function prototype. Example: 'fn name():' ";
// ### Built-in
pub const ERROR_TEXT: &str =
"Text component expects one argument of type string. Example: Text(\"hola\")";
pub const ERROR_TYPING: &str =
"Typing component expects one argument of type int or float. Example: Typing(3,..)";
pub const ERROR_WAIT: &str =
"Wait component expects one argument of type int or float. Example: Wait(3)";
pub const ERROR_BUTTON: &str =
"Button component expects at least one argument of type string. Example: Button(\"hola\")";
pub const ERROR_CARD_BUTTON: &str = "argument 'buttons' in Card component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CARD_TITLE: &str = "argument title in Card component must be of type String";
pub const ERROR_QUESTION: &str = "argument 'buttons' in Question component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CAROUSEL: &str =
"argument 'cards' in Carousel component must be of type Array<Card>";
pub const ERROR_ONE_OF: &str =
"OneOf builtin expects one value of type Array. Example: OneOf( [1, 2, 3] )";
pub const ERROR_VAR_EXISTS: &str =
"Exists builtin expects one value of type String. Example: Exists( \"var_name\" )";
pub const ERROR_SHUFFLE: &str =
"Shuffle builtin expects one value of type Array. Example: Shuffle( [1, 2, 3] )";
pub const ERROR_LENGTH: &str =
"Length builtin expects one value of type Array or String. Example: Length( value )";
pub const ERROR_FIND: &str = "Find builtin expects 'in' param to be of type String. Example: Find(value, in = \"hola\", case_sensitive = true)";
pub const ERROR_FLOOR: &str =
"Floor builtin expects one argument of type float. Example: Floor(4.2)";
pub const ERROR_UUID: &str =
"UUID builtin expects one optional argument of type String. Example: UUID(\"v4\") or UUID(\"v1\")";
pub const ERROR_IMAGE: &str =
"Image component expects one argument of type string. Example: Image(\"hola\")";
pub const ERROR_URL: &str = "Url component expects one argument of type string and 2 optional string arguments: text, title. Example: Url(\"hola\", text = \"text\", title = \"title\")";
pub const ERROR_VIDEO: &str =
"Video component expects one argument of type string. Example: Video(url = \"hola\")";
pub const ERROR_AUDIO: &str =
"Audio component expects one argument of type string. Example: Audio(url = \"hola\")";
pub const ERROR_FILE: &str =
"File component expects one argument of type string. Example: File(url = \"hola\")";
pub const ERROR_HTTP_GET_VALUE: &str =
"not found in HTTP object. Use the HTTP() builtin to construct the correct object to make HTTP calls";
pub const ERROR_HTTP_QUERY_VALUES: &str =
"must have a value of type String. Example: {key: \"value\"}";
pub const ERROR_HTTP: &str =
"HTTP builtin expects one url of type string. Example: HTTP(\"https://clevy.io\")";
pub const ERROR_JWT: &str = "JWT builtin expects payload as argument. Example: JWT({
\"user\": \"name\",
\"somekey\": {
\"somevalue\": 42
},
\"exp\": 1618064023,
\"iss\": \"CSML STUDIO\"
})";
pub const ERROR_SMTP: &str =
"SMTP builtin expects SMTP Server Address. Example: SMTP(\"smtp.gmail.com\")";
pub const ERROR_CRYPTO: &str =
"CRYPTO builtin expects one argument of type string. Example: CRYPTO(\"text\")";
pub const ERROR_BUILTIN_UNKNOWN: &str = "Unknown builtin";
// ### native Components
pub const ERROR_HTTP_NOT_DATA: &str = "bad format: no 'data' in HTTP response";
pub const ERROR_NATIVE_COMPONENT: &str = "native component does not exist";
// ### Constants
pub const ERROR_CONSTANT_MUTABLE_FUNCTION: &str =
"Invalid operation constants can not execute self mutable functions";
pub const ERROR_INVALID_CONSTANT_EXPR: &str =
"Constant invalid expression type: constants can not be assign this type of expression";
// ### Primitives
// #### Indexing
pub const ERROR_INDEXING: &str =
"indexing can only be done in ARRAY, OBJECT or STRING primitive types";
// #### Closure
pub const ERROR_CLOSURE_UNKNOWN_METHOD: &str = "Closure don't have methods";
// #### Boolean
pub const ERROR_BOOLEAN_UNKNOWN_METHOD: &str = "is not a method of Boolean";
// #### NUMBER
pub const ERROR_NUMBER_POW: &str =
"[pow] takes one parameter of type int or float usage: number.pow(42)";
// #### Float
pub const ERROR_FLOAT_UNKNOWN_METHOD: &str = "is not a method of Float";
// #### Int
pub const ERROR_INT_UNKNOWN_METHOD: &str = "is not a method of Int";
// #### Null
pub const ERROR_NULL_UNKNOWN_METHOD: &str = "is not a method of Null";
// #### String
pub const ERROR_STRING_DO_MATCH: &str =
"[do_match] takes one parameter of type String. Usage: string.do_match(\"tag\")";
pub const ERROR_STRING_APPEND: &str =
"[append] takes one parameter of type String. Usage: string.append(\"text to append\")";
pub const ERROR_STRING_CONTAINS: &str =
"[contains] takes one parameter of type String. Usage: string.contains(\"word\")";
pub const ERROR_STRING_REPLACE: &str =
"[replace] takes tow parameter of type String. Usage: \"this is old\".replace(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_ALL: &str =
"[replace_all] takes tow parameter of type String. Usage: \"old old old old\".replace_all(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_REGEX: &str =
"[replace_regex] takes tow parameter of type String. Usage: \"hello world\".replace_regex(\"world\", \"Clevy\")";
pub const ERROR_STRING_CONTAINS_REGEX: &str =
"[contains_regex] takes one parameter of type String. Usage: string.contains_regex(\"regex\")";
pub const ERROR_STRING_VALID_REGEX: &str = "parameter must be a valid regex expression"; // link to docs
pub const ERROR_STRING_START_WITH: &str =
"[starts_with] takes one parameter of type String. Usage: string.starts_with(\"tag\")";
pub const ERROR_STRING_START_WITH_REGEX: &str = "[starts_with_regex] takes one parameter of type String. Usage: string.start_with_regex(\"regex\")";
pub const ERROR_STRING_END_WITH: &str =
"[ends_with] takes one parameter of type String. Usage: string.ends_with(\"tag\")";
pub const ERROR_STRING_END_WITH_REGEX: &str =
"[ends_with_regex] takes one parameter of type String. Usage: string.ends_with_regex(\"regex\")";
pub const ERROR_STRING_FROM_JSON: &str = "[from_json] [!] string to object failed]";
pub const ERROR_STRING_SPLIT: &str =
"[split] takes one parameter of type String. Usage: string.split(\"separator\")";
pub const ERROR_STRING_MATCH_REGEX: &str =
"[match_regex] takes one parameter of type String. Usage: string.match_regex(\"regex\")";
pub const ERROR_STRING_POW: &str =
"[pow] takes one parameter of type Float or Int. Usage: string.pow(number)";
pub const ERROR_STRING_COS: &str = "[cos] the string must be of numeric type in order to use cos. Verify first with'string.is_number() == true' ";
pub const ERROR_STRING_NUMERIC: &str = "the string must be of numeric type in order to use this method. Verify first with'string.is_number() == true' to check it";
pub const ERROR_STRING_RHS: &str = "rhs must be of type string";
pub const ERROR_SLICE_ARG_INT: &str =
".slice(start, optional<end>) args need to be of type Integer";
pub const ERROR_SLICE_ARG_LEN: &str =
".slice(start, optional<end>) args need to be inferior to the string length";
pub const ERROR_SLICE_ARG2: &str =
".slice(start, optional<end>) end need to be superior to start in value ex:.slice(2, 5)";
pub const ERROR_STRING_UNKNOWN_METHOD: &str = "is not a method of String";
// #### Array
pub const ERROR_ARRAY_TYPE: &str = "value must be of type array";
pub const ERROR_ARRAY_INDEX_EXIST: &str = "index does not exist";
pub const ERROR_ARRAY_INDEX_TYPE: &str = "index must be of type int";
pub const ERROR_ARRAY_NEGATIVE: &str = "index must be positive. Udage: array[1]";
pub const ERROR_ARRAY_INDEX: &str = "index must be lower than or equal to array.length()";
pub const ERROR_ARRAY_OVERFLOW: &str = "[push] Cannot push inside array, since array limit is ";
pub const ERROR_ARRAY_POP: &str = "[pop] Cannot pop if array is empty";
pub const ERROR_ARRAY_INSERT_AT: &str =
"[insert_at] takes two arguments. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_INSERT_AT_INT: &str =
"[insert_at] first parameter must be of type int. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_REMOVE_AT: &str =
"[remove_at] takes one parameter of type Int. Usage: array.remove_at(1) ";
pub const ERROR_ARRAY_JOIN: &str =
"[join] takes one parameter of type String. Usage: array.join(\"elem\") ";
pub const ERROR_ARRAY_INDEX_OF: &str =
"[index_of] takes one parameter. Usage: array.index_of(elem)";
pub const ERROR_ARRAY_FIND: &str = "[find] takes one parameter. Usage: array.find(elem)";
pub const ERROR_ARRAY_UNKNOWN_METHOD: &str = "is not a method of Array";
// #### CRYPTO OBJECT
// ## HMAC and HASH OBJECT
pub const ERROR_HASH: &str = "Crypto(string) command expect argument of type String";
pub const ERROR_HASH_ALGO: &str =
"Invalid Algorithm, supported Algorithms are md5 sha1 sha256 sha384 sha512";
pub const ERROR_HMAC_KEY: &str = "HMAC key need to be of type string";
pub const ERROR_DIGEST: &str = "Invalid argument, '.digest' is use incorrectly";
pub const ERROR_DIGEST_ALGO: &str =
"Invalid Digest Algorithm, supported Algorithms are hex, base64";
// #### JWT OBJECT
pub const ERROR_JWT_ALGO: &str = "Invalid Algorithm, supported Algorithms are HS256, HS384, HS512";
pub const ERROR_JWT_SECRET: &str = "secret must be of type String";
pub const ERROR_JWT_SIGN_CLAIMS: &str =
"JWT(claims) command expect argument 'claims' of type Object";
pub const ERROR_JWT_SIGN_ALGO: &str =
"JWT(claims).sign(algo, secret, Optional<Header>) expect first argument 'algo' of type String";
pub const ERROR_JWT_SIGN_SECRET: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect second argument 'claims' of type String";
pub const ERROR_JWT_TOKEN: &str = "JWT(jwt) command expect argument 'jwt' of type String";
pub const ERROR_JWT_DECODE_ALGO: &str =
"JWT(jwt).decode(algo, secret) expect first argument 'algo' of type String";
pub const ERROR_JWT_DECODE_SECRET: &str =
"JWT(jwt).decode(algo, secret) expect second argument 'claims' of type String";
pub const ERROR_JWT_VALIDATION_CLAIMS: &str =
"JWT(jwt).verify(claims, algo, secret) expect first argument 'claims' of type Object";
pub const ERROR_JWT_VALIDATION_ALGO: &str =
"JWT(jwt).verify(claims, algo, secret) expect second argument 'algo' of type String";
pub const ERROR_JWT_VALIDATION_SECRETE: &str =
"JWT(jwt).verify(claims, algo, secret) expect third argument'secrete' of type String";
// #### HTTP OBJECT
pub const ERROR_HTTP_SET: &str =
"[set] takes one argument of type Object. Usage: HTTP(...).set( {\"key\": 42} )";
pub const ERROR_HTTP_QUERY: &str =
"[query] takes one argument of type Object. Usage: HTTP(...).query( {\"key\": 42} )";
pub const ERROR_HTTP_SEND: &str = "[send] HTTP Object is bad formatted read doc for correct usage";
pub const ERROR_HTTP_UNKNOWN_METHOD: &str = "is not a method of HTTP";
// #### OBJECT
pub const ERROR_OBJECT_TYPE: &str = "value must be of type Object";
pub const ERROR_OBJECT_GET: &str = "key does not exist";
pub const ERROR_OBJECT_CONTAINS: &str =
"[contains] takes one argument of type String. Usage: object.contains(\"key\")";
pub const ERROR_OBJECT_GET_GENERICS: &str =
"[get_generics] takes one argument of type String. Usage: object.get_generics(\"key\")";
pub const ERROR_OBJECT_INSERT: &str =
"[insert] take tow arguments. Usage: object.insert(string, any_type)";
pub const ERROR_OBJECT_ASSIGN: &str =
"[assign] take one argument. Usage: object.assign({\"key\": \"value\"})";
pub const ERROR_OBJECT_REMOVE: &str =
"[remove] takes one argument of type String. Usage: object.remove(\"key\")";
pub const ERROR_OBJECT_GET_KEY: &str = "key must be of type String";
pub const ERROR_OBJECT_UNKNOWN_METHOD: &str = "is not a method of Object";
// #### METHODS
pub const ERROR_METHOD_NAMED_ARGS: &str = "arguments in method are not named";
pub const ERROR_OPS: &str = "[!] Ops: Illegal operation";
pub const ERROR_OPS_DIV_INT: &str = "[!] Int: Division by zero";
pub const ERROR_OPS_DIV_FLOAT: &str = "[!] Float: Division by zero";
pub const ERROR_ILLEGAL_OPERATION: &str = "illegal operation:";
pub const OVERFLOWING_OPERATION: &str = "overflowing operation:";
////////////////////////////////////////////////////////////////////////////////
// PRiVTE FUNCTION
////////////////////////////////////////////////////////////////////////////////
fn add_context_to_error_message<'a>(
flow_slice: Span<'a>,
message: String,
line_number: u32,
column: usize,
offset: usize,
) -> String {
use std::fmt::Write;
let mut result = String::new();
let prefix = &flow_slice.fragment().as_bytes()[..offset];
// Find the line that includes the subslice:
// Find the *last* newline before the substring starts
let line_begin = prefix
.iter()
.rev()
.position(|&b| b == b'\n')
.map(|pos| offset - pos)
.unwrap_or(0);
// Find the full line after that newline
let line = flow_slice.fragment()[line_begin..]
.lines()
.next()
.unwrap_or(&flow_slice.fragment()[line_begin..])
.trim_end();
write!(
&mut result,
"at line {line_number},\n\
{line}\n\
{caret:>column$}\n\
{context}\n\n",
line_number = line_number,
context = message,
line = line,
caret = '^',
column = column,
)
// Because `write!` to a `String` is infallible, this `unwrap` is fine.
.unwrap();
result
}
////////////////////////////////////////////////////////////////////////////////
// PUBLIC FUNCTION
////////////////////////////////////////////////////////////////////////////////
pub fn gen_error_info(position: Position, message: String) -> ErrorInfo {
ErrorInfo::new(position, message)
}
pub fn gen_warning_info(position: Position, message: String) -> Warnings {
Warnings { position, message }
}
pub fn gen_nom_error<'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
|
pub fn gen_nom_failure<'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{
Err::Failure(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
}
pub fn convert_error_from_span<'a>(flow_slice: Span<'a>, e: CustomError<Span<'a>>) -> String {
let message = e.error.to_owned();
let offset = e.input.location_offset();
// Count the number of newlines in the first `offset` bytes of input
let line_number = e.input.location_line();
// The (1-indexed) column number is the offset of our substring into that line
let column = e.input.get_column();
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn convert_error_from_interval<'a>(
flow_slice: Span<'a>,
message: String,
interval: Interval,
) -> String {
let offset = interval.offset;
// Count the number of newlines in the first `offset` bytes of input
let line_number = interval.start_line;
// The (1-indexed) column number is the offset of our substring into that line
let column = interval.start_column as usize;
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn gen_infinite_loop_error_msg(infinite_loop: Vec<(String, String)>) -> String {
infinite_loop
.iter()
.fold(String::new(), |mut acc, (flow, step)| {
acc.push_str(&format!("[flow] {}, [step] {}\n", flow, step));
acc
})
}
| {
Err::Error(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
} | identifier_body |
error_format.rs | pub mod data;
use crate::data::tokens::Span;
use crate::data::{position::Position, warnings::Warnings, Interval};
use nom::{
error::{ContextError, ErrorKind, ParseError},
*,
};
pub use crate::data::error_info::ErrorInfo;
pub use data::CustomError;
// TODO: add link to docs
// Parsing Errors
pub const ERROR_PARENTHESES: &str = "list elem type (... ) not found";
pub const ERROR_PARENTHESES_END: &str =
"Invalid argument. Expecting one ',' between each argument or ')' to end the list";
pub const ERROR_NUMBER_AS_IDENT: &str = "Int/Float can't be used as identifier";
pub const ERROR_FLOW_STEP: &str = "syntax error.";
pub const ERROR_RESERVED: &str = "reserved keyword can't be used as identifier";
pub const ERROR_PARSING: &str =
"Invalid argument. One of the action keywords [say, do, if,...] is missing";
pub const ERROR_REMEMBER: &str =
"'remember' must be assigning to a variable via '='. Example:'remember key = value'";
pub const ERROR_USE: &str =
"'use' must be assigning a variable with keyword 'as'. Example: 'use value as key'";
pub const ERROR_ACTION_ARGUMENT: &str =
"expecting valid argument after action keywords. Example: say value";
pub const ERROR_IMPORT_ARGUMENT: &str =
"'import' expecting valid function name. Example: 'import function from flow'";
pub const ERROR_INSERT_ARGUMENT: &str =
"'insert' expecting valid step name. Example: 'insert step from flow'";
pub const ERROR_BREAK: &str = "break can only be used inside loops";
pub const ERROR_RETURN: &str = "return expects a value to return";
pub const ERROR_LEFT_BRACE: &str = "expecting '{'";
pub const ERROR_RIGHT_BRACE: &str = "expecting '}'";
pub const ERROR_RIGHT_BRACKET: &str = "expecting ']'";
pub const ERROR_GOTO_STEP: &str = "missing step name after goto";
pub const ERROR_IMPORT_STEP: &str = "missing step name after import";
pub const ERROR_DOUBLE_QUOTE: &str = "expecting '\"' to end string";
pub const ERROR_DOUBLE_OPEN_BRACE: &str = "expecting '{{' to begin expandable string";
pub const ERROR_DOUBLE_CLOSE_BRACE: &str = "expecting '}}' to end expandable string";
pub const ERROR_UNREACHABLE: &str = "unreachable";
pub const ERROR_WRONG_ARGUMENT_EXPANDABLE_STRING: &str =
"wrong argument(s) given to expandable string";
pub const ERROR_FN_SCOPE: &str =
"invalid action. Use a valid action for this type of scope [do, if, return,...]"; //\ndoc: https://docs.csml.dev/language/native-csml-functions
// Linter Errors
pub const ERROR_NO_FLOW: &str = "bot must have at least one flow";
// ##Interpreter Errors
// ### Validation
pub const ERROR_STEP_EXIST: &str = "step does not exist";
pub const ERROR_INVALID_FLOW: &str = "invalid flow: ";
pub const ERROR_START_INSTRUCTIONS: &str =
"to start an action one of the following instructions is expected: [say, do, if, foreach, goto]";
pub const ERROR_FOREACH: &str =
"foreach only accepts iterable elements like arrays and strings. Example: foreach(elem) in [1, 2, 3]";
pub const ERROR_FIND_BY_INDEX: &str =
"index must be of type int or string. Example var.[42] or var.[\"key\"]";
pub const ERROR_ASSIGN_IDENT: &str = "key must be of type identifier";
pub const ERROR_SIZE_IDENT: &str = "key can't be longer than 255 character";
pub const ERROR_NUMBER_AS_KEY: &str = "Int/Float can't be used as key";
pub const ERROR_KEY_ALPHANUMERIC: &str = "key must be alphanumeric";
pub const ERROR_FUNCTIONS_ARGS: &str = "function arguments must be in an array";
pub const ERROR_EXPR_TO_LITERAL: &str = "expression can't be converted to Literal";
pub const ERROR_PAYLOAD_EXCEED_MAX_SIZE: &str = "payload exceeds max payload size (16kb)";
pub const ERROR_STEP_LIMIT: &str =
"[Infinite loop] Step limit reached: 100 steps where executed in a single run";
// Event
pub const ERROR_EVENT_CONTENT_TYPE: &str = "event can only be of ContentType::Event";
// Goto
pub const ERROR_GOTO_VAR: &str = "variables in goto need to resolve as strings";
// Component
pub const ERROR_COMPONENT_NAMESPACE: &str = "component must have a function applied";
pub const ERROR_COMPONENT_UNKNOWN: &str = "function does not exist for component";
// Fn API
pub const ERROR_FN_ID: &str = "App name must be of type string";
pub const ERROR_FN_ENDPOINT: &str = "App can not be called because apps_endpoint is not set in bot";
pub const ERROR_FAIL_RESPONSE_JSON: &str = "failed to read response as JSON";
// ### Import
pub const ERROR_IMPORT_FAIL: &str = "import failed at";
pub const ERROR_IMPORT_STEP_FLOW: &str = "step not found in flow";
// ### Variables
pub const ERROR_GET_VAR_INFO: &str = "Expression must be a variable";
pub const ERROR_JSON_TO_LITERAL: &str = "Number is larger than a 64-bit integer";
// ### Memory
pub const ERROR_STEP_MEMORY: &str = "Variable does not exist in step's memory";
pub const ERROR_FIND_MEMORY: &str = "is used before it was saved in memory";
// ### Functions
pub const ERROR_FN_ARGS: &str = "function arguments are not valid";
pub const ERROR_FN_COLON: &str =
"Expecting ':' at the end of function prototype. Example: 'fn name():' ";
// ### Built-in
pub const ERROR_TEXT: &str =
"Text component expects one argument of type string. Example: Text(\"hola\")";
pub const ERROR_TYPING: &str =
"Typing component expects one argument of type int or float. Example: Typing(3,..)";
pub const ERROR_WAIT: &str =
"Wait component expects one argument of type int or float. Example: Wait(3)";
pub const ERROR_BUTTON: &str =
"Button component expects at least one argument of type string. Example: Button(\"hola\")";
pub const ERROR_CARD_BUTTON: &str = "argument 'buttons' in Card component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CARD_TITLE: &str = "argument title in Card component must be of type String";
pub const ERROR_QUESTION: &str = "argument 'buttons' in Question component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CAROUSEL: &str =
"argument 'cards' in Carousel component must be of type Array<Card>";
pub const ERROR_ONE_OF: &str =
"OneOf builtin expects one value of type Array. Example: OneOf( [1, 2, 3] )";
pub const ERROR_VAR_EXISTS: &str =
"Exists builtin expects one value of type String. Example: Exists( \"var_name\" )";
pub const ERROR_SHUFFLE: &str =
"Shuffle builtin expects one value of type Array. Example: Shuffle( [1, 2, 3] )";
pub const ERROR_LENGTH: &str =
"Length builtin expects one value of type Array or String. Example: Length( value )";
pub const ERROR_FIND: &str = "Find builtin expects 'in' param to be of type String. Example: Find(value, in = \"hola\", case_sensitive = true)";
pub const ERROR_FLOOR: &str =
"Floor builtin expects one argument of type float. Example: Floor(4.2)";
pub const ERROR_UUID: &str =
"UUID builtin expects one optional argument of type String. Example: UUID(\"v4\") or UUID(\"v1\")";
pub const ERROR_IMAGE: &str =
"Image component expects one argument of type string. Example: Image(\"hola\")";
pub const ERROR_URL: &str = "Url component expects one argument of type string and 2 optional string arguments: text, title. Example: Url(\"hola\", text = \"text\", title = \"title\")";
pub const ERROR_VIDEO: &str =
"Video component expects one argument of type string. Example: Video(url = \"hola\")";
pub const ERROR_AUDIO: &str =
"Audio component expects one argument of type string. Example: Audio(url = \"hola\")";
pub const ERROR_FILE: &str =
"File component expects one argument of type string. Example: File(url = \"hola\")";
pub const ERROR_HTTP_GET_VALUE: &str =
"not found in HTTP object. Use the HTTP() builtin to construct the correct object to make HTTP calls";
pub const ERROR_HTTP_QUERY_VALUES: &str =
"must have a value of type String. Example: {key: \"value\"}";
pub const ERROR_HTTP: &str =
"HTTP builtin expects one url of type string. Example: HTTP(\"https://clevy.io\")";
pub const ERROR_JWT: &str = "JWT builtin expects payload as argument. Example: JWT({
\"user\": \"name\",
\"somekey\": {
\"somevalue\": 42
},
\"exp\": 1618064023,
\"iss\": \"CSML STUDIO\"
})";
pub const ERROR_SMTP: &str =
"SMTP builtin expects SMTP Server Address. Example: SMTP(\"smtp.gmail.com\")";
pub const ERROR_CRYPTO: &str =
"CRYPTO builtin expects one argument of type string. Example: CRYPTO(\"text\")";
pub const ERROR_BUILTIN_UNKNOWN: &str = "Unknown builtin";
// ### native Components
pub const ERROR_HTTP_NOT_DATA: &str = "bad format: no 'data' in HTTP response";
pub const ERROR_NATIVE_COMPONENT: &str = "native component does not exist";
// ### Constants
pub const ERROR_CONSTANT_MUTABLE_FUNCTION: &str =
"Invalid operation constants can not execute self mutable functions";
pub const ERROR_INVALID_CONSTANT_EXPR: &str =
"Constant invalid expression type: constants can not be assign this type of expression";
// ### Primitives
// #### Indexing
pub const ERROR_INDEXING: &str =
"indexing can only be done in ARRAY, OBJECT or STRING primitive types";
// #### Closure
pub const ERROR_CLOSURE_UNKNOWN_METHOD: &str = "Closure don't have methods";
// #### Boolean
pub const ERROR_BOOLEAN_UNKNOWN_METHOD: &str = "is not a method of Boolean";
// #### NUMBER
pub const ERROR_NUMBER_POW: &str =
"[pow] takes one parameter of type int or float usage: number.pow(42)";
// #### Float
pub const ERROR_FLOAT_UNKNOWN_METHOD: &str = "is not a method of Float";
// #### Int
pub const ERROR_INT_UNKNOWN_METHOD: &str = "is not a method of Int";
// #### Null
pub const ERROR_NULL_UNKNOWN_METHOD: &str = "is not a method of Null";
// #### String
pub const ERROR_STRING_DO_MATCH: &str =
"[do_match] takes one parameter of type String. Usage: string.do_match(\"tag\")";
pub const ERROR_STRING_APPEND: &str =
"[append] takes one parameter of type String. Usage: string.append(\"text to append\")";
pub const ERROR_STRING_CONTAINS: &str =
"[contains] takes one parameter of type String. Usage: string.contains(\"word\")";
pub const ERROR_STRING_REPLACE: &str =
"[replace] takes tow parameter of type String. Usage: \"this is old\".replace(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_ALL: &str =
"[replace_all] takes tow parameter of type String. Usage: \"old old old old\".replace_all(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_REGEX: &str =
"[replace_regex] takes tow parameter of type String. Usage: \"hello world\".replace_regex(\"world\", \"Clevy\")";
pub const ERROR_STRING_CONTAINS_REGEX: &str =
"[contains_regex] takes one parameter of type String. Usage: string.contains_regex(\"regex\")";
pub const ERROR_STRING_VALID_REGEX: &str = "parameter must be a valid regex expression"; // link to docs
pub const ERROR_STRING_START_WITH: &str =
"[starts_with] takes one parameter of type String. Usage: string.starts_with(\"tag\")";
pub const ERROR_STRING_START_WITH_REGEX: &str = "[starts_with_regex] takes one parameter of type String. Usage: string.start_with_regex(\"regex\")";
pub const ERROR_STRING_END_WITH: &str =
"[ends_with] takes one parameter of type String. Usage: string.ends_with(\"tag\")";
pub const ERROR_STRING_END_WITH_REGEX: &str =
"[ends_with_regex] takes one parameter of type String. Usage: string.ends_with_regex(\"regex\")";
pub const ERROR_STRING_FROM_JSON: &str = "[from_json] [!] string to object failed]";
pub const ERROR_STRING_SPLIT: &str =
"[split] takes one parameter of type String. Usage: string.split(\"separator\")";
pub const ERROR_STRING_MATCH_REGEX: &str =
"[match_regex] takes one parameter of type String. Usage: string.match_regex(\"regex\")";
pub const ERROR_STRING_POW: &str =
"[pow] takes one parameter of type Float or Int. Usage: string.pow(number)";
pub const ERROR_STRING_COS: &str = "[cos] the string must be of numeric type in order to use cos. Verify first with'string.is_number() == true' ";
pub const ERROR_STRING_NUMERIC: &str = "the string must be of numeric type in order to use this method. Verify first with'string.is_number() == true' to check it";
pub const ERROR_STRING_RHS: &str = "rhs must be of type string";
pub const ERROR_SLICE_ARG_INT: &str =
".slice(start, optional<end>) args need to be of type Integer";
pub const ERROR_SLICE_ARG_LEN: &str =
".slice(start, optional<end>) args need to be inferior to the string length";
pub const ERROR_SLICE_ARG2: &str =
".slice(start, optional<end>) end need to be superior to start in value ex:.slice(2, 5)";
pub const ERROR_STRING_UNKNOWN_METHOD: &str = "is not a method of String";
// #### Array
pub const ERROR_ARRAY_TYPE: &str = "value must be of type array";
pub const ERROR_ARRAY_INDEX_EXIST: &str = "index does not exist";
pub const ERROR_ARRAY_INDEX_TYPE: &str = "index must be of type int";
pub const ERROR_ARRAY_NEGATIVE: &str = "index must be positive. Udage: array[1]";
pub const ERROR_ARRAY_INDEX: &str = "index must be lower than or equal to array.length()";
pub const ERROR_ARRAY_OVERFLOW: &str = "[push] Cannot push inside array, since array limit is ";
pub const ERROR_ARRAY_POP: &str = "[pop] Cannot pop if array is empty";
pub const ERROR_ARRAY_INSERT_AT: &str =
"[insert_at] takes two arguments. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_INSERT_AT_INT: &str =
"[insert_at] first parameter must be of type int. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_REMOVE_AT: &str =
"[remove_at] takes one parameter of type Int. Usage: array.remove_at(1) ";
pub const ERROR_ARRAY_JOIN: &str =
"[join] takes one parameter of type String. Usage: array.join(\"elem\") ";
pub const ERROR_ARRAY_INDEX_OF: &str =
"[index_of] takes one parameter. Usage: array.index_of(elem)";
pub const ERROR_ARRAY_FIND: &str = "[find] takes one parameter. Usage: array.find(elem)";
pub const ERROR_ARRAY_UNKNOWN_METHOD: &str = "is not a method of Array";
// #### CRYPTO OBJECT
// ## HMAC and HASH OBJECT
pub const ERROR_HASH: &str = "Crypto(string) command expect argument of type String";
pub const ERROR_HASH_ALGO: &str =
"Invalid Algorithm, supported Algorithms are md5 sha1 sha256 sha384 sha512";
pub const ERROR_HMAC_KEY: &str = "HMAC key need to be of type string";
pub const ERROR_DIGEST: &str = "Invalid argument, '.digest' is use incorrectly";
pub const ERROR_DIGEST_ALGO: &str =
"Invalid Digest Algorithm, supported Algorithms are hex, base64";
// #### JWT OBJECT
pub const ERROR_JWT_ALGO: &str = "Invalid Algorithm, supported Algorithms are HS256, HS384, HS512";
pub const ERROR_JWT_SECRET: &str = "secret must be of type String";
pub const ERROR_JWT_SIGN_CLAIMS: &str =
"JWT(claims) command expect argument 'claims' of type Object";
pub const ERROR_JWT_SIGN_ALGO: &str =
"JWT(claims).sign(algo, secret, Optional<Header>) expect first argument 'algo' of type String";
pub const ERROR_JWT_SIGN_SECRET: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect second argument 'claims' of type String";
pub const ERROR_JWT_TOKEN: &str = "JWT(jwt) command expect argument 'jwt' of type String";
pub const ERROR_JWT_DECODE_ALGO: &str =
"JWT(jwt).decode(algo, secret) expect first argument 'algo' of type String";
pub const ERROR_JWT_DECODE_SECRET: &str = | "JWT(jwt).decode(algo, secret) expect second argument 'claims' of type String";
pub const ERROR_JWT_VALIDATION_CLAIMS: &str =
"JWT(jwt).verify(claims, algo, secret) expect first argument 'claims' of type Object";
pub const ERROR_JWT_VALIDATION_ALGO: &str =
"JWT(jwt).verify(claims, algo, secret) expect second argument 'algo' of type String";
pub const ERROR_JWT_VALIDATION_SECRETE: &str =
"JWT(jwt).verify(claims, algo, secret) expect third argument'secrete' of type String";
// #### HTTP OBJECT
pub const ERROR_HTTP_SET: &str =
"[set] takes one argument of type Object. Usage: HTTP(...).set( {\"key\": 42} )";
pub const ERROR_HTTP_QUERY: &str =
"[query] takes one argument of type Object. Usage: HTTP(...).query( {\"key\": 42} )";
pub const ERROR_HTTP_SEND: &str = "[send] HTTP Object is bad formatted read doc for correct usage";
pub const ERROR_HTTP_UNKNOWN_METHOD: &str = "is not a method of HTTP";
// #### OBJECT
pub const ERROR_OBJECT_TYPE: &str = "value must be of type Object";
pub const ERROR_OBJECT_GET: &str = "key does not exist";
pub const ERROR_OBJECT_CONTAINS: &str =
"[contains] takes one argument of type String. Usage: object.contains(\"key\")";
pub const ERROR_OBJECT_GET_GENERICS: &str =
"[get_generics] takes one argument of type String. Usage: object.get_generics(\"key\")";
pub const ERROR_OBJECT_INSERT: &str =
"[insert] take tow arguments. Usage: object.insert(string, any_type)";
pub const ERROR_OBJECT_ASSIGN: &str =
"[assign] take one argument. Usage: object.assign({\"key\": \"value\"})";
pub const ERROR_OBJECT_REMOVE: &str =
"[remove] takes one argument of type String. Usage: object.remove(\"key\")";
pub const ERROR_OBJECT_GET_KEY: &str = "key must be of type String";
pub const ERROR_OBJECT_UNKNOWN_METHOD: &str = "is not a method of Object";
// #### METHODS
pub const ERROR_METHOD_NAMED_ARGS: &str = "arguments in method are not named";
pub const ERROR_OPS: &str = "[!] Ops: Illegal operation";
pub const ERROR_OPS_DIV_INT: &str = "[!] Int: Division by zero";
pub const ERROR_OPS_DIV_FLOAT: &str = "[!] Float: Division by zero";
pub const ERROR_ILLEGAL_OPERATION: &str = "illegal operation:";
pub const OVERFLOWING_OPERATION: &str = "overflowing operation:";
////////////////////////////////////////////////////////////////////////////////
// PRiVTE FUNCTION
////////////////////////////////////////////////////////////////////////////////
fn add_context_to_error_message<'a>(
flow_slice: Span<'a>,
message: String,
line_number: u32,
column: usize,
offset: usize,
) -> String {
use std::fmt::Write;
let mut result = String::new();
let prefix = &flow_slice.fragment().as_bytes()[..offset];
// Find the line that includes the subslice:
// Find the *last* newline before the substring starts
let line_begin = prefix
.iter()
.rev()
.position(|&b| b == b'\n')
.map(|pos| offset - pos)
.unwrap_or(0);
// Find the full line after that newline
let line = flow_slice.fragment()[line_begin..]
.lines()
.next()
.unwrap_or(&flow_slice.fragment()[line_begin..])
.trim_end();
write!(
&mut result,
"at line {line_number},\n\
{line}\n\
{caret:>column$}\n\
{context}\n\n",
line_number = line_number,
context = message,
line = line,
caret = '^',
column = column,
)
// Because `write!` to a `String` is infallible, this `unwrap` is fine.
.unwrap();
result
}
////////////////////////////////////////////////////////////////////////////////
// PUBLIC FUNCTION
////////////////////////////////////////////////////////////////////////////////
pub fn gen_error_info(position: Position, message: String) -> ErrorInfo {
ErrorInfo::new(position, message)
}
pub fn gen_warning_info(position: Position, message: String) -> Warnings {
Warnings { position, message }
}
pub fn gen_nom_error<'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{
Err::Error(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
}
pub fn gen_nom_failure<'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{
Err::Failure(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
}
pub fn convert_error_from_span<'a>(flow_slice: Span<'a>, e: CustomError<Span<'a>>) -> String {
let message = e.error.to_owned();
let offset = e.input.location_offset();
// Count the number of newlines in the first `offset` bytes of input
let line_number = e.input.location_line();
// The (1-indexed) column number is the offset of our substring into that line
let column = e.input.get_column();
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn convert_error_from_interval<'a>(
flow_slice: Span<'a>,
message: String,
interval: Interval,
) -> String {
let offset = interval.offset;
// Count the number of newlines in the first `offset` bytes of input
let line_number = interval.start_line;
// The (1-indexed) column number is the offset of our substring into that line
let column = interval.start_column as usize;
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn gen_infinite_loop_error_msg(infinite_loop: Vec<(String, String)>) -> String {
infinite_loop
.iter()
.fold(String::new(), |mut acc, (flow, step)| {
acc.push_str(&format!("[flow] {}, [step] {}\n", flow, step));
acc
})
} | random_line_split |
|
error_format.rs | pub mod data;
use crate::data::tokens::Span;
use crate::data::{position::Position, warnings::Warnings, Interval};
use nom::{
error::{ContextError, ErrorKind, ParseError},
*,
};
pub use crate::data::error_info::ErrorInfo;
pub use data::CustomError;
// TODO: add link to docs
// Parsing Errors
pub const ERROR_PARENTHESES: &str = "list elem type (... ) not found";
pub const ERROR_PARENTHESES_END: &str =
"Invalid argument. Expecting one ',' between each argument or ')' to end the list";
pub const ERROR_NUMBER_AS_IDENT: &str = "Int/Float can't be used as identifier";
pub const ERROR_FLOW_STEP: &str = "syntax error.";
pub const ERROR_RESERVED: &str = "reserved keyword can't be used as identifier";
pub const ERROR_PARSING: &str =
"Invalid argument. One of the action keywords [say, do, if,...] is missing";
pub const ERROR_REMEMBER: &str =
"'remember' must be assigning to a variable via '='. Example:'remember key = value'";
pub const ERROR_USE: &str =
"'use' must be assigning a variable with keyword 'as'. Example: 'use value as key'";
pub const ERROR_ACTION_ARGUMENT: &str =
"expecting valid argument after action keywords. Example: say value";
pub const ERROR_IMPORT_ARGUMENT: &str =
"'import' expecting valid function name. Example: 'import function from flow'";
pub const ERROR_INSERT_ARGUMENT: &str =
"'insert' expecting valid step name. Example: 'insert step from flow'";
pub const ERROR_BREAK: &str = "break can only be used inside loops";
pub const ERROR_RETURN: &str = "return expects a value to return";
pub const ERROR_LEFT_BRACE: &str = "expecting '{'";
pub const ERROR_RIGHT_BRACE: &str = "expecting '}'";
pub const ERROR_RIGHT_BRACKET: &str = "expecting ']'";
pub const ERROR_GOTO_STEP: &str = "missing step name after goto";
pub const ERROR_IMPORT_STEP: &str = "missing step name after import";
pub const ERROR_DOUBLE_QUOTE: &str = "expecting '\"' to end string";
pub const ERROR_DOUBLE_OPEN_BRACE: &str = "expecting '{{' to begin expandable string";
pub const ERROR_DOUBLE_CLOSE_BRACE: &str = "expecting '}}' to end expandable string";
pub const ERROR_UNREACHABLE: &str = "unreachable";
pub const ERROR_WRONG_ARGUMENT_EXPANDABLE_STRING: &str =
"wrong argument(s) given to expandable string";
pub const ERROR_FN_SCOPE: &str =
"invalid action. Use a valid action for this type of scope [do, if, return,...]"; //\ndoc: https://docs.csml.dev/language/native-csml-functions
// Linter Errors
pub const ERROR_NO_FLOW: &str = "bot must have at least one flow";
// ##Interpreter Errors
// ### Validation
pub const ERROR_STEP_EXIST: &str = "step does not exist";
pub const ERROR_INVALID_FLOW: &str = "invalid flow: ";
pub const ERROR_START_INSTRUCTIONS: &str =
"to start an action one of the following instructions is expected: [say, do, if, foreach, goto]";
pub const ERROR_FOREACH: &str =
"foreach only accepts iterable elements like arrays and strings. Example: foreach(elem) in [1, 2, 3]";
pub const ERROR_FIND_BY_INDEX: &str =
"index must be of type int or string. Example var.[42] or var.[\"key\"]";
pub const ERROR_ASSIGN_IDENT: &str = "key must be of type identifier";
pub const ERROR_SIZE_IDENT: &str = "key can't be longer than 255 character";
pub const ERROR_NUMBER_AS_KEY: &str = "Int/Float can't be used as key";
pub const ERROR_KEY_ALPHANUMERIC: &str = "key must be alphanumeric";
pub const ERROR_FUNCTIONS_ARGS: &str = "function arguments must be in an array";
pub const ERROR_EXPR_TO_LITERAL: &str = "expression can't be converted to Literal";
pub const ERROR_PAYLOAD_EXCEED_MAX_SIZE: &str = "payload exceeds max payload size (16kb)";
pub const ERROR_STEP_LIMIT: &str =
"[Infinite loop] Step limit reached: 100 steps where executed in a single run";
// Event
pub const ERROR_EVENT_CONTENT_TYPE: &str = "event can only be of ContentType::Event";
// Goto
pub const ERROR_GOTO_VAR: &str = "variables in goto need to resolve as strings";
// Component
pub const ERROR_COMPONENT_NAMESPACE: &str = "component must have a function applied";
pub const ERROR_COMPONENT_UNKNOWN: &str = "function does not exist for component";
// Fn API
pub const ERROR_FN_ID: &str = "App name must be of type string";
pub const ERROR_FN_ENDPOINT: &str = "App can not be called because apps_endpoint is not set in bot";
pub const ERROR_FAIL_RESPONSE_JSON: &str = "failed to read response as JSON";
// ### Import
pub const ERROR_IMPORT_FAIL: &str = "import failed at";
pub const ERROR_IMPORT_STEP_FLOW: &str = "step not found in flow";
// ### Variables
pub const ERROR_GET_VAR_INFO: &str = "Expression must be a variable";
pub const ERROR_JSON_TO_LITERAL: &str = "Number is larger than a 64-bit integer";
// ### Memory
pub const ERROR_STEP_MEMORY: &str = "Variable does not exist in step's memory";
pub const ERROR_FIND_MEMORY: &str = "is used before it was saved in memory";
// ### Functions
pub const ERROR_FN_ARGS: &str = "function arguments are not valid";
pub const ERROR_FN_COLON: &str =
"Expecting ':' at the end of function prototype. Example: 'fn name():' ";
// ### Built-in
pub const ERROR_TEXT: &str =
"Text component expects one argument of type string. Example: Text(\"hola\")";
pub const ERROR_TYPING: &str =
"Typing component expects one argument of type int or float. Example: Typing(3,..)";
pub const ERROR_WAIT: &str =
"Wait component expects one argument of type int or float. Example: Wait(3)";
pub const ERROR_BUTTON: &str =
"Button component expects at least one argument of type string. Example: Button(\"hola\")";
pub const ERROR_CARD_BUTTON: &str = "argument 'buttons' in Card component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CARD_TITLE: &str = "argument title in Card component must be of type String";
pub const ERROR_QUESTION: &str = "argument 'buttons' in Question component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CAROUSEL: &str =
"argument 'cards' in Carousel component must be of type Array<Card>";
pub const ERROR_ONE_OF: &str =
"OneOf builtin expects one value of type Array. Example: OneOf( [1, 2, 3] )";
pub const ERROR_VAR_EXISTS: &str =
"Exists builtin expects one value of type String. Example: Exists( \"var_name\" )";
pub const ERROR_SHUFFLE: &str =
"Shuffle builtin expects one value of type Array. Example: Shuffle( [1, 2, 3] )";
pub const ERROR_LENGTH: &str =
"Length builtin expects one value of type Array or String. Example: Length( value )";
pub const ERROR_FIND: &str = "Find builtin expects 'in' param to be of type String. Example: Find(value, in = \"hola\", case_sensitive = true)";
pub const ERROR_FLOOR: &str =
"Floor builtin expects one argument of type float. Example: Floor(4.2)";
pub const ERROR_UUID: &str =
"UUID builtin expects one optional argument of type String. Example: UUID(\"v4\") or UUID(\"v1\")";
pub const ERROR_IMAGE: &str =
"Image component expects one argument of type string. Example: Image(\"hola\")";
pub const ERROR_URL: &str = "Url component expects one argument of type string and 2 optional string arguments: text, title. Example: Url(\"hola\", text = \"text\", title = \"title\")";
pub const ERROR_VIDEO: &str =
"Video component expects one argument of type string. Example: Video(url = \"hola\")";
pub const ERROR_AUDIO: &str =
"Audio component expects one argument of type string. Example: Audio(url = \"hola\")";
pub const ERROR_FILE: &str =
"File component expects one argument of type string. Example: File(url = \"hola\")";
pub const ERROR_HTTP_GET_VALUE: &str =
"not found in HTTP object. Use the HTTP() builtin to construct the correct object to make HTTP calls";
pub const ERROR_HTTP_QUERY_VALUES: &str =
"must have a value of type String. Example: {key: \"value\"}";
pub const ERROR_HTTP: &str =
"HTTP builtin expects one url of type string. Example: HTTP(\"https://clevy.io\")";
pub const ERROR_JWT: &str = "JWT builtin expects payload as argument. Example: JWT({
\"user\": \"name\",
\"somekey\": {
\"somevalue\": 42
},
\"exp\": 1618064023,
\"iss\": \"CSML STUDIO\"
})";
pub const ERROR_SMTP: &str =
"SMTP builtin expects SMTP Server Address. Example: SMTP(\"smtp.gmail.com\")";
pub const ERROR_CRYPTO: &str =
"CRYPTO builtin expects one argument of type string. Example: CRYPTO(\"text\")";
pub const ERROR_BUILTIN_UNKNOWN: &str = "Unknown builtin";
// ### native Components
pub const ERROR_HTTP_NOT_DATA: &str = "bad format: no 'data' in HTTP response";
pub const ERROR_NATIVE_COMPONENT: &str = "native component does not exist";
// ### Constants
pub const ERROR_CONSTANT_MUTABLE_FUNCTION: &str =
"Invalid operation constants can not execute self mutable functions";
pub const ERROR_INVALID_CONSTANT_EXPR: &str =
"Constant invalid expression type: constants can not be assign this type of expression";
// ### Primitives
// #### Indexing
pub const ERROR_INDEXING: &str =
"indexing can only be done in ARRAY, OBJECT or STRING primitive types";
// #### Closure
pub const ERROR_CLOSURE_UNKNOWN_METHOD: &str = "Closure don't have methods";
// #### Boolean
pub const ERROR_BOOLEAN_UNKNOWN_METHOD: &str = "is not a method of Boolean";
// #### NUMBER
pub const ERROR_NUMBER_POW: &str =
"[pow] takes one parameter of type int or float usage: number.pow(42)";
// #### Float
pub const ERROR_FLOAT_UNKNOWN_METHOD: &str = "is not a method of Float";
// #### Int
pub const ERROR_INT_UNKNOWN_METHOD: &str = "is not a method of Int";
// #### Null
pub const ERROR_NULL_UNKNOWN_METHOD: &str = "is not a method of Null";
// #### String
pub const ERROR_STRING_DO_MATCH: &str =
"[do_match] takes one parameter of type String. Usage: string.do_match(\"tag\")";
pub const ERROR_STRING_APPEND: &str =
"[append] takes one parameter of type String. Usage: string.append(\"text to append\")";
pub const ERROR_STRING_CONTAINS: &str =
"[contains] takes one parameter of type String. Usage: string.contains(\"word\")";
pub const ERROR_STRING_REPLACE: &str =
"[replace] takes tow parameter of type String. Usage: \"this is old\".replace(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_ALL: &str =
"[replace_all] takes tow parameter of type String. Usage: \"old old old old\".replace_all(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_REGEX: &str =
"[replace_regex] takes tow parameter of type String. Usage: \"hello world\".replace_regex(\"world\", \"Clevy\")";
pub const ERROR_STRING_CONTAINS_REGEX: &str =
"[contains_regex] takes one parameter of type String. Usage: string.contains_regex(\"regex\")";
pub const ERROR_STRING_VALID_REGEX: &str = "parameter must be a valid regex expression"; // link to docs
pub const ERROR_STRING_START_WITH: &str =
"[starts_with] takes one parameter of type String. Usage: string.starts_with(\"tag\")";
pub const ERROR_STRING_START_WITH_REGEX: &str = "[starts_with_regex] takes one parameter of type String. Usage: string.start_with_regex(\"regex\")";
pub const ERROR_STRING_END_WITH: &str =
"[ends_with] takes one parameter of type String. Usage: string.ends_with(\"tag\")";
pub const ERROR_STRING_END_WITH_REGEX: &str =
"[ends_with_regex] takes one parameter of type String. Usage: string.ends_with_regex(\"regex\")";
pub const ERROR_STRING_FROM_JSON: &str = "[from_json] [!] string to object failed]";
pub const ERROR_STRING_SPLIT: &str =
"[split] takes one parameter of type String. Usage: string.split(\"separator\")";
pub const ERROR_STRING_MATCH_REGEX: &str =
"[match_regex] takes one parameter of type String. Usage: string.match_regex(\"regex\")";
pub const ERROR_STRING_POW: &str =
"[pow] takes one parameter of type Float or Int. Usage: string.pow(number)";
pub const ERROR_STRING_COS: &str = "[cos] the string must be of numeric type in order to use cos. Verify first with'string.is_number() == true' ";
pub const ERROR_STRING_NUMERIC: &str = "the string must be of numeric type in order to use this method. Verify first with'string.is_number() == true' to check it";
pub const ERROR_STRING_RHS: &str = "rhs must be of type string";
pub const ERROR_SLICE_ARG_INT: &str =
".slice(start, optional<end>) args need to be of type Integer";
pub const ERROR_SLICE_ARG_LEN: &str =
".slice(start, optional<end>) args need to be inferior to the string length";
pub const ERROR_SLICE_ARG2: &str =
".slice(start, optional<end>) end need to be superior to start in value ex:.slice(2, 5)";
pub const ERROR_STRING_UNKNOWN_METHOD: &str = "is not a method of String";
// #### Array
pub const ERROR_ARRAY_TYPE: &str = "value must be of type array";
pub const ERROR_ARRAY_INDEX_EXIST: &str = "index does not exist";
pub const ERROR_ARRAY_INDEX_TYPE: &str = "index must be of type int";
pub const ERROR_ARRAY_NEGATIVE: &str = "index must be positive. Udage: array[1]";
pub const ERROR_ARRAY_INDEX: &str = "index must be lower than or equal to array.length()";
pub const ERROR_ARRAY_OVERFLOW: &str = "[push] Cannot push inside array, since array limit is ";
pub const ERROR_ARRAY_POP: &str = "[pop] Cannot pop if array is empty";
pub const ERROR_ARRAY_INSERT_AT: &str =
"[insert_at] takes two arguments. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_INSERT_AT_INT: &str =
"[insert_at] first parameter must be of type int. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_REMOVE_AT: &str =
"[remove_at] takes one parameter of type Int. Usage: array.remove_at(1) ";
pub const ERROR_ARRAY_JOIN: &str =
"[join] takes one parameter of type String. Usage: array.join(\"elem\") ";
pub const ERROR_ARRAY_INDEX_OF: &str =
"[index_of] takes one parameter. Usage: array.index_of(elem)";
pub const ERROR_ARRAY_FIND: &str = "[find] takes one parameter. Usage: array.find(elem)";
pub const ERROR_ARRAY_UNKNOWN_METHOD: &str = "is not a method of Array";
// #### CRYPTO OBJECT
// ## HMAC and HASH OBJECT
pub const ERROR_HASH: &str = "Crypto(string) command expect argument of type String";
pub const ERROR_HASH_ALGO: &str =
"Invalid Algorithm, supported Algorithms are md5 sha1 sha256 sha384 sha512";
pub const ERROR_HMAC_KEY: &str = "HMAC key need to be of type string";
pub const ERROR_DIGEST: &str = "Invalid argument, '.digest' is use incorrectly";
pub const ERROR_DIGEST_ALGO: &str =
"Invalid Digest Algorithm, supported Algorithms are hex, base64";
// #### JWT OBJECT
pub const ERROR_JWT_ALGO: &str = "Invalid Algorithm, supported Algorithms are HS256, HS384, HS512";
pub const ERROR_JWT_SECRET: &str = "secret must be of type String";
pub const ERROR_JWT_SIGN_CLAIMS: &str =
"JWT(claims) command expect argument 'claims' of type Object";
pub const ERROR_JWT_SIGN_ALGO: &str =
"JWT(claims).sign(algo, secret, Optional<Header>) expect first argument 'algo' of type String";
pub const ERROR_JWT_SIGN_SECRET: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect second argument 'claims' of type String";
pub const ERROR_JWT_TOKEN: &str = "JWT(jwt) command expect argument 'jwt' of type String";
pub const ERROR_JWT_DECODE_ALGO: &str =
"JWT(jwt).decode(algo, secret) expect first argument 'algo' of type String";
pub const ERROR_JWT_DECODE_SECRET: &str =
"JWT(jwt).decode(algo, secret) expect second argument 'claims' of type String";
pub const ERROR_JWT_VALIDATION_CLAIMS: &str =
"JWT(jwt).verify(claims, algo, secret) expect first argument 'claims' of type Object";
pub const ERROR_JWT_VALIDATION_ALGO: &str =
"JWT(jwt).verify(claims, algo, secret) expect second argument 'algo' of type String";
pub const ERROR_JWT_VALIDATION_SECRETE: &str =
"JWT(jwt).verify(claims, algo, secret) expect third argument'secrete' of type String";
// #### HTTP OBJECT
pub const ERROR_HTTP_SET: &str =
"[set] takes one argument of type Object. Usage: HTTP(...).set( {\"key\": 42} )";
pub const ERROR_HTTP_QUERY: &str =
"[query] takes one argument of type Object. Usage: HTTP(...).query( {\"key\": 42} )";
pub const ERROR_HTTP_SEND: &str = "[send] HTTP Object is bad formatted read doc for correct usage";
pub const ERROR_HTTP_UNKNOWN_METHOD: &str = "is not a method of HTTP";
// #### OBJECT
pub const ERROR_OBJECT_TYPE: &str = "value must be of type Object";
pub const ERROR_OBJECT_GET: &str = "key does not exist";
pub const ERROR_OBJECT_CONTAINS: &str =
"[contains] takes one argument of type String. Usage: object.contains(\"key\")";
pub const ERROR_OBJECT_GET_GENERICS: &str =
"[get_generics] takes one argument of type String. Usage: object.get_generics(\"key\")";
pub const ERROR_OBJECT_INSERT: &str =
"[insert] take tow arguments. Usage: object.insert(string, any_type)";
pub const ERROR_OBJECT_ASSIGN: &str =
"[assign] take one argument. Usage: object.assign({\"key\": \"value\"})";
pub const ERROR_OBJECT_REMOVE: &str =
"[remove] takes one argument of type String. Usage: object.remove(\"key\")";
pub const ERROR_OBJECT_GET_KEY: &str = "key must be of type String";
pub const ERROR_OBJECT_UNKNOWN_METHOD: &str = "is not a method of Object";
// #### METHODS
pub const ERROR_METHOD_NAMED_ARGS: &str = "arguments in method are not named";
pub const ERROR_OPS: &str = "[!] Ops: Illegal operation";
pub const ERROR_OPS_DIV_INT: &str = "[!] Int: Division by zero";
pub const ERROR_OPS_DIV_FLOAT: &str = "[!] Float: Division by zero";
pub const ERROR_ILLEGAL_OPERATION: &str = "illegal operation:";
pub const OVERFLOWING_OPERATION: &str = "overflowing operation:";
////////////////////////////////////////////////////////////////////////////////
// PRiVTE FUNCTION
////////////////////////////////////////////////////////////////////////////////
fn add_context_to_error_message<'a>(
flow_slice: Span<'a>,
message: String,
line_number: u32,
column: usize,
offset: usize,
) -> String {
use std::fmt::Write;
let mut result = String::new();
let prefix = &flow_slice.fragment().as_bytes()[..offset];
// Find the line that includes the subslice:
// Find the *last* newline before the substring starts
let line_begin = prefix
.iter()
.rev()
.position(|&b| b == b'\n')
.map(|pos| offset - pos)
.unwrap_or(0);
// Find the full line after that newline
let line = flow_slice.fragment()[line_begin..]
.lines()
.next()
.unwrap_or(&flow_slice.fragment()[line_begin..])
.trim_end();
write!(
&mut result,
"at line {line_number},\n\
{line}\n\
{caret:>column$}\n\
{context}\n\n",
line_number = line_number,
context = message,
line = line,
caret = '^',
column = column,
)
// Because `write!` to a `String` is infallible, this `unwrap` is fine.
.unwrap();
result
}
////////////////////////////////////////////////////////////////////////////////
// PUBLIC FUNCTION
////////////////////////////////////////////////////////////////////////////////
pub fn gen_error_info(position: Position, message: String) -> ErrorInfo {
ErrorInfo::new(position, message)
}
pub fn gen_warning_info(position: Position, message: String) -> Warnings {
Warnings { position, message }
}
pub fn gen_nom_error<'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{
Err::Error(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
}
pub fn | <'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{
Err::Failure(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
}
pub fn convert_error_from_span<'a>(flow_slice: Span<'a>, e: CustomError<Span<'a>>) -> String {
let message = e.error.to_owned();
let offset = e.input.location_offset();
// Count the number of newlines in the first `offset` bytes of input
let line_number = e.input.location_line();
// The (1-indexed) column number is the offset of our substring into that line
let column = e.input.get_column();
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn convert_error_from_interval<'a>(
flow_slice: Span<'a>,
message: String,
interval: Interval,
) -> String {
let offset = interval.offset;
// Count the number of newlines in the first `offset` bytes of input
let line_number = interval.start_line;
// The (1-indexed) column number is the offset of our substring into that line
let column = interval.start_column as usize;
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn gen_infinite_loop_error_msg(infinite_loop: Vec<(String, String)>) -> String {
infinite_loop
.iter()
.fold(String::new(), |mut acc, (flow, step)| {
acc.push_str(&format!("[flow] {}, [step] {}\n", flow, step));
acc
})
}
| gen_nom_failure | identifier_name |
lmdb_backend.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::types::{
AcctPathMapping, ChildNumber, Context, Identifier, NodeClient, OutputData, Result, Transaction,
TxLogEntry, TxProof, WalletBackend, WalletBackendBatch, WalletSeed,
};
use crate::common::config::WalletConfig;
use crate::common::{ErrorKind, Keychain};
use crate::internal::restore;
use blake2_rfc::blake2b::Blake2b;
use chrono::Utc;
use failure::ResultExt;
use grin_core::{global, ser};
use grin_keychain::SwitchCommitmentType;
use grin_store::Store;
use grin_store::{self, option_to_not_found, to_key, to_key_u64};
use grin_util::secp::constants::SECRET_KEY_SIZE;
use grin_util::{from_hex, to_hex, ZeroingString};
use std::cell::RefCell;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::ops::Deref;
use std::path::Path;
pub const DB_DIR: &'static str = "db";
pub const TX_SAVE_DIR: &'static str = "saved_txs";
pub const TX_PROOF_SAVE_DIR: &'static str = "saved_proofs";
const OUTPUT_PREFIX: u8 = 'o' as u8;
const DERIV_PREFIX: u8 = 'd' as u8;
const CONFIRMED_HEIGHT_PREFIX: u8 = 'c' as u8;
const PRIVATE_TX_CONTEXT_PREFIX: u8 = 'p' as u8;
const TX_LOG_ENTRY_PREFIX: u8 = 't' as u8;
const TX_LOG_ID_PREFIX: u8 = 'i' as u8;
const ACCOUNT_PATH_MAPPING_PREFIX: u8 = 'a' as u8;
fn private_ctx_xor_keys<K>(
keychain: &K,
slate_id: &[u8],
) -> Result<([u8; SECRET_KEY_SIZE], [u8; SECRET_KEY_SIZE])>
where
K: Keychain,
{
let root_key = keychain.derive_key(0, &K::root_key_id(), &SwitchCommitmentType::None)?;
// derive XOR values for storing secret values in DB
// h(root_key|slate_id|"blind")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"blind".as_bytes()[..]);
let blind_xor_key = hasher.finalize();
let mut ret_blind = [0; SECRET_KEY_SIZE];
ret_blind.copy_from_slice(&blind_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
// h(root_key|slate_id|"nonce")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"nonce".as_bytes()[..]);
let nonce_xor_key = hasher.finalize();
let mut ret_nonce = [0; SECRET_KEY_SIZE];
ret_nonce.copy_from_slice(&nonce_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
Ok((ret_blind, ret_nonce))
}
pub struct Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
db: Option<Store>,
password: Option<ZeroingString>,
pub keychain: Option<K>,
parent_key_id: Identifier,
config: WalletConfig,
w2n_client: C,
}
impl<C, K> Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
fn db(&self) -> Result<&Store> {
self.db.as_ref().ok_or(ErrorKind::NoWallet.into())
}
/// Create `Backend` instance
pub fn new(config: &WalletConfig, client: C) -> Result<Self> {
Ok(Self {
db: None,
password: None,
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: client,
})
}
/*pub fn new(config: &WalletConfig, password: &str, n_client: C) -> Result<Self> {
let res = Backend {
db: None,
password: Some(ZeroingString::from(password)),
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: n_client,
};
Ok(res)
}*/
}
impl<C, K> WalletBackend<C, K> for Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
/// Check whether the backend has a seed or not
fn has_seed(&self) -> Result<bool> {
Ok(WalletSeed::seed_file_exists(&self.config).is_err())
}
/// Get the seed
fn get_seed(&self) -> Result<ZeroingString> {
match &self.password {
Some(p) => {
let seed = WalletSeed::from_file(&self.config, p)?;
seed.to_mnemonic().map(|s| s.into())
}
None => Err(ErrorKind::NoWallet.into()),
}
}
/// Set a new seed, encrypt with `password`
/// Should fail if backend already has a seed,
/// unless `overwrite` is set to `true
fn set_seed(
&mut self,
mnemonic: Option<ZeroingString>,
password: ZeroingString,
overwrite: bool,
) -> Result<()> {
if self.has_seed()? &&!overwrite {
return Err(ErrorKind::WalletHasSeed.into());
}
self.password = Some(password.clone());
let _ = WalletSeed::init_file(&self.config, 24, mnemonic, &password, overwrite)?;
Ok(())
}
/// Check if the backend connection is established
fn connected(&self) -> Result<bool> {
Ok(self.db.is_some())
}
/// Connect to the backend
fn connect(&mut self) -> Result<()> {
if!self.has_seed()? {
return Err(ErrorKind::WalletNoSeed.into());
}
if self.connected()? {
return Err(ErrorKind::WalletConnected.into());
}
let root_path = Path::new(&self.config.data_file_dir);
let db_path = root_path.join(DB_DIR);
fs::create_dir_all(&db_path)?;
let stored_tx_path = root_path.join(TX_SAVE_DIR);
fs::create_dir_all(&stored_tx_path)?;
let stored_tx_proof_path = root_path.join(TX_PROOF_SAVE_DIR);
fs::create_dir_all(&stored_tx_proof_path)?;
let store = Store::new(db_path.to_str().unwrap(), None, Some(DB_DIR), None)?;
let default_account = AcctPathMapping {
label: "default".to_string(),
path: K::derive_key_id(2, 0, 0, 0, 0),
};
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut default_account.label.as_bytes().to_vec(),
);
if!store.exists(&acct_key)? {
let batch = store.batch()?;
batch.put_ser(&acct_key, &default_account)?;
batch.commit()?;
}
self.db = Some(store);
Ok(())
}
/// Disconnect from backend
fn disconnect(&mut self) -> Result<()> {
self.db = None;
Ok(())
}
/// Set password
fn set_password(&mut self, password: ZeroingString) -> Result<()> {
let _ = WalletSeed::from_file(&self.config, password.deref())?;
self.password = Some(password);
Ok(())
}
/// Clear out backend
fn clear(&mut self) -> Result<()> {
self.disconnect()?;
let root_path = Path::new(&self.config.data_file_dir);
if!root_path.exists() {
return Ok(());
}
let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string();
let backup_path = root_path.join("backups").join(backup_dir);
fs::create_dir_all(&backup_path)?;
let db_path = root_path.join(DB_DIR);
if db_path.exists() {
fs::rename(&db_path, &backup_path.join(DB_DIR))?;
}
let txs_path = root_path.join(TX_SAVE_DIR);
if txs_path.exists() {
fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?;
}
let proofs_path = root_path.join(TX_PROOF_SAVE_DIR);
if proofs_path.exists() {
fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?;
}
self.connect()?;
Ok(())
}
/// Initialise with whatever stored credentials we have
fn open_with_credentials(&mut self) -> Result<()> {
let wallet_seed = WalletSeed::from_file(
&self.config,
&self.password.clone().ok_or(ErrorKind::OpenWalletError)?,
)
.map_err(|_| ErrorKind::OpenWalletError)?;
self.keychain = Some(
wallet_seed
.derive_keychain(global::is_floonet())
.map_err(|_| ErrorKind::DeriveKeychainError)?,
);
Ok(())
}
/// Close wallet and remove any stored credentials (TBD)
fn close(&mut self) -> Result<()> {
self.keychain = None;
Ok(())
}
/// Return the keychain being used
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
/// Return the node client being used
fn w2n_client(&mut self) -> &mut C {
&mut self.w2n_client
}
/// Set parent path by account name
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> {
let label = label.to_owned();
let res = self.accounts()?.find(|l| l.label == label);
if let Some(a) = res {
self.set_parent_key_id(&a.path);
Ok(())
} else {
return Err(ErrorKind::UnknownAccountLabel(label.clone()).into());
}
}
/// set parent path
fn set_parent_key_id(&mut self, id: &Identifier) {
self.parent_key_id = id.clone();
}
fn get_parent_key_id(&self) -> Identifier {
self.parent_key_id.clone()
}
fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> {
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id))
.map_err(|e| e.into())
}
fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> {
Ok(Box::new(
self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1),
))
}
fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> {
let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec());
self.db()?.get_ser(&key).map_err(|e| e.into())
}
fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[TX_LOG_ENTRY_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || {
format!("Slate id: {:x?}", slate_id.to_vec())
})?;
for i in 0..SECRET_KEY_SIZE {
ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i];
ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
Ok(ctx)
}
fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[ACCOUNT_PATH_MAPPING_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> {
let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec());
let ser = self.db()?.get_ser(&acct_key)?;
Ok(ser)
}
fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
if!path.exists() {
return Ok(None);
}
let tx_file = Path::new(&path).to_path_buf();
let mut tx_f = File::open(tx_file)?;
let mut content = String::new();
tx_f.read_to_string(&mut content)?;
let tx_bin = from_hex(content).unwrap();
Ok(Some(
ser::deserialize::<Transaction>(&mut &tx_bin[..], ser::ProtocolVersion(1)).unwrap(),
))
}
fn has_stored_tx_proof(&self, uuid: &str) -> Result<bool> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
Ok(tx_proof_file.exists())
}
fn get_stored_tx_proof(&self, uuid: &str) -> Result<Option<TxProof>> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
if!tx_proof_file.exists() {
return Ok(None);
}
let mut tx_proof_f = File::open(tx_proof_file)?;
let mut content = String::new();
tx_proof_f.read_to_string(&mut content)?;
Ok(Some(serde_json::from_str(&content)?))
}
fn batch<'a>(&'a self) -> Result<Box<dyn WalletBackendBatch<K> + 'a>> {
Ok(Box::new(Batch {
_store: self,
db: RefCell::new(Some(self.db()?.batch()?)),
keychain: self.keychain.clone(),
}))
}
fn next_child<'a>(&mut self) -> Result<Identifier> {
let mut deriv_idx = {
let batch = self.db()?.batch()?;
let deriv_key = to_key(DERIV_PREFIX, &mut self.parent_key_id.to_bytes().to_vec());
match batch.get_ser(&deriv_key)? {
Some(idx) => idx,
None => 0,
}
};
let mut return_path = self.parent_key_id.to_path();
return_path.depth = return_path.depth + 1;
return_path.path[return_path.depth as usize - 1] = ChildNumber::from(deriv_idx);
deriv_idx = deriv_idx + 1;
let mut batch = self.batch()?;
batch.save_child_index(&self.parent_key_id, deriv_idx)?;
batch.commit()?;
Ok(Identifier::from_path(&return_path))
}
fn get_last_confirmed_height<'a>(&self) -> Result<u64> {
let batch = self.db()?.batch()?;
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self.parent_key_id.to_bytes().to_vec(),
);
let last_confirmed_height = match batch.get_ser(&height_key)? {
Some(h) => h,
None => 0,
};
Ok(last_confirmed_height)
}
fn restore(&mut self) -> Result<()> {
restore::restore(self).context(ErrorKind::Restore)?;
Ok(())
}
fn check_repair(&mut self, delete_unconfirmed: bool) -> Result<()> {
restore::check_repair(self, delete_unconfirmed).context(ErrorKind::Restore)?;
Ok(())
}
fn calc_commit_for_cache(&mut self, amount: u64, id: &Identifier) -> Result<Option<String>> {
if self.config.no_commit_cache == Some(true) {
Ok(None)
} else {
Ok(Some(grin_util::to_hex(
self.keychain()
.commit(amount, id, &SwitchCommitmentType::Regular)?
.0
.to_vec(),
)))
}
}
}
/// An atomic batch in which all changes can be committed all at once or
/// discarded on error.
pub struct Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
_store: &'a Backend<C, K>,
db: RefCell<Option<grin_store::Batch<'a>>>,
/// Keychain
keychain: Option<K>,
}
#[allow(missing_docs)]
impl<'a, C, K> WalletBackendBatch<K> for Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
fn save_output(&mut self, out: &OutputData) -> Result<()> {
// Save the output data to the db.
{
let key = match out.mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec(), i),
None => to_key(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec()),
};
self.db.borrow().as_ref().unwrap().put_ser(&key, &out)?;
}
Ok(())
}
fn delete_output(&mut self, id: &Identifier, mmr_index: &Option<u64>) -> Result<()> {
// Delete the output data.
{
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
let _ = self.db.borrow().as_ref().unwrap().delete(&key);
}
Ok(())
}
fn store_tx(&self, uuid: &str, tx: &Transaction) -> Result<()> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let tx_hex = to_hex(ser::ser_vec(tx, ser::ProtocolVersion(1)).unwrap());
stored_tx.write_all(&tx_hex.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn store_tx_proof(&self, uuid: &str, tx_proof: &TxProof) -> Result<()> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let proof_ser = serde_json::to_string(tx_proof)?;
stored_tx.write_all(&proof_ser.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn | (&mut self, parent_key_id: &Identifier) -> Result<u32> {
let tx_id_key = to_key(TX_LOG_ID_PREFIX, &mut parent_key_id.to_bytes().to_vec());
let last_tx_log_id = match self.db.borrow().as_ref().unwrap().get_ser(&tx_id_key)? {
Some(t) => t,
None => 0,
};
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_id_key, &(last_tx_log_id + 1))?;
Ok(last_tx_log_id)
}
fn save_last_confirmed_height(&mut self, height: u64) -> Result<()> {
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self._store.get_parent_key_id().to_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&height_key, &height)?;
Ok(())
}
fn save_child_index(&mut self, parent_key_id: &Identifier, index: u32) -> Result<()> {
let deriv_key = to_key(DERIV_PREFIX, &mut parent_key_id.to_bytes().to_vec());
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&deriv_key, &index)?;
Ok(())
}
fn save_tx_log_entry(&mut self, t: &TxLogEntry) -> Result<()> {
let tx_log_key = to_key_u64(
TX_LOG_ENTRY_PREFIX,
&mut t.parent_key_id.to_bytes().to_vec(),
t.id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_log_key, &t)?;
Ok(())
}
fn save_acct_path(&mut self, mapping: &AcctPathMapping) -> Result<()> {
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut mapping.label.as_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&acct_key, &mapping)?;
Ok(())
}
fn lock_output(&mut self, out: &mut OutputData) -> Result<()> {
out.lock();
self.save_output(out)
}
fn save_private_context(
&mut self,
slate_id: &[u8],
participant_id: usize,
ctx: &Context,
) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut s_ctx = ctx.clone();
for i in 0..SECRET_KEY_SIZE {
s_ctx.sec_key.0[i] = s_ctx.sec_key.0[i] ^ blind_xor_key[i];
s_ctx.sec_nonce.0[i] = s_ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&ctx_key, &s_ctx)?;
Ok(())
}
fn delete_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.delete(&ctx_key)
.map_err(|e| e.into())
}
fn commit(&mut self) -> Result<()> {
let db = self.db.replace(None);
db.unwrap().commit()?;
Ok(())
}
}
| next_tx_log_id | identifier_name |
lmdb_backend.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::types::{
AcctPathMapping, ChildNumber, Context, Identifier, NodeClient, OutputData, Result, Transaction,
TxLogEntry, TxProof, WalletBackend, WalletBackendBatch, WalletSeed,
};
use crate::common::config::WalletConfig;
use crate::common::{ErrorKind, Keychain};
use crate::internal::restore;
use blake2_rfc::blake2b::Blake2b;
use chrono::Utc;
use failure::ResultExt;
use grin_core::{global, ser};
use grin_keychain::SwitchCommitmentType;
use grin_store::Store;
use grin_store::{self, option_to_not_found, to_key, to_key_u64};
use grin_util::secp::constants::SECRET_KEY_SIZE;
use grin_util::{from_hex, to_hex, ZeroingString};
use std::cell::RefCell;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::ops::Deref;
use std::path::Path;
pub const DB_DIR: &'static str = "db";
pub const TX_SAVE_DIR: &'static str = "saved_txs";
pub const TX_PROOF_SAVE_DIR: &'static str = "saved_proofs";
const OUTPUT_PREFIX: u8 = 'o' as u8;
const DERIV_PREFIX: u8 = 'd' as u8;
const CONFIRMED_HEIGHT_PREFIX: u8 = 'c' as u8;
const PRIVATE_TX_CONTEXT_PREFIX: u8 = 'p' as u8;
const TX_LOG_ENTRY_PREFIX: u8 = 't' as u8;
const TX_LOG_ID_PREFIX: u8 = 'i' as u8;
const ACCOUNT_PATH_MAPPING_PREFIX: u8 = 'a' as u8;
fn private_ctx_xor_keys<K>(
keychain: &K,
slate_id: &[u8],
) -> Result<([u8; SECRET_KEY_SIZE], [u8; SECRET_KEY_SIZE])>
where
K: Keychain,
{
let root_key = keychain.derive_key(0, &K::root_key_id(), &SwitchCommitmentType::None)?;
// derive XOR values for storing secret values in DB
// h(root_key|slate_id|"blind")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"blind".as_bytes()[..]);
let blind_xor_key = hasher.finalize();
let mut ret_blind = [0; SECRET_KEY_SIZE];
ret_blind.copy_from_slice(&blind_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
// h(root_key|slate_id|"nonce")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"nonce".as_bytes()[..]);
let nonce_xor_key = hasher.finalize();
let mut ret_nonce = [0; SECRET_KEY_SIZE];
ret_nonce.copy_from_slice(&nonce_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
Ok((ret_blind, ret_nonce))
}
pub struct Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
db: Option<Store>,
password: Option<ZeroingString>,
pub keychain: Option<K>,
parent_key_id: Identifier,
config: WalletConfig,
w2n_client: C,
}
impl<C, K> Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
fn db(&self) -> Result<&Store> {
self.db.as_ref().ok_or(ErrorKind::NoWallet.into())
}
/// Create `Backend` instance
pub fn new(config: &WalletConfig, client: C) -> Result<Self> {
Ok(Self {
db: None,
password: None,
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: client,
})
}
/*pub fn new(config: &WalletConfig, password: &str, n_client: C) -> Result<Self> {
let res = Backend {
db: None,
password: Some(ZeroingString::from(password)),
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: n_client,
};
Ok(res)
}*/
}
impl<C, K> WalletBackend<C, K> for Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
/// Check whether the backend has a seed or not
fn has_seed(&self) -> Result<bool> {
Ok(WalletSeed::seed_file_exists(&self.config).is_err())
}
/// Get the seed
fn get_seed(&self) -> Result<ZeroingString> {
match &self.password {
Some(p) => {
let seed = WalletSeed::from_file(&self.config, p)?;
seed.to_mnemonic().map(|s| s.into())
}
None => Err(ErrorKind::NoWallet.into()),
}
}
/// Set a new seed, encrypt with `password`
/// Should fail if backend already has a seed,
/// unless `overwrite` is set to `true
fn set_seed(
&mut self,
mnemonic: Option<ZeroingString>,
password: ZeroingString,
overwrite: bool,
) -> Result<()> {
if self.has_seed()? &&!overwrite {
return Err(ErrorKind::WalletHasSeed.into());
}
self.password = Some(password.clone());
let _ = WalletSeed::init_file(&self.config, 24, mnemonic, &password, overwrite)?;
Ok(())
}
/// Check if the backend connection is established
fn connected(&self) -> Result<bool> {
Ok(self.db.is_some())
}
/// Connect to the backend
fn connect(&mut self) -> Result<()> {
if!self.has_seed()? {
return Err(ErrorKind::WalletNoSeed.into());
}
if self.connected()? {
return Err(ErrorKind::WalletConnected.into());
}
let root_path = Path::new(&self.config.data_file_dir);
let db_path = root_path.join(DB_DIR);
fs::create_dir_all(&db_path)?;
let stored_tx_path = root_path.join(TX_SAVE_DIR);
fs::create_dir_all(&stored_tx_path)?;
let stored_tx_proof_path = root_path.join(TX_PROOF_SAVE_DIR);
fs::create_dir_all(&stored_tx_proof_path)?;
let store = Store::new(db_path.to_str().unwrap(), None, Some(DB_DIR), None)?;
let default_account = AcctPathMapping {
label: "default".to_string(),
path: K::derive_key_id(2, 0, 0, 0, 0),
};
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut default_account.label.as_bytes().to_vec(),
);
if!store.exists(&acct_key)? {
let batch = store.batch()?;
batch.put_ser(&acct_key, &default_account)?;
batch.commit()?;
}
self.db = Some(store);
Ok(())
}
/// Disconnect from backend
fn disconnect(&mut self) -> Result<()> {
self.db = None;
Ok(())
}
/// Set password
fn set_password(&mut self, password: ZeroingString) -> Result<()> {
let _ = WalletSeed::from_file(&self.config, password.deref())?;
self.password = Some(password);
Ok(())
}
/// Clear out backend
fn clear(&mut self) -> Result<()> {
self.disconnect()?;
let root_path = Path::new(&self.config.data_file_dir);
if!root_path.exists() {
return Ok(());
}
let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string();
let backup_path = root_path.join("backups").join(backup_dir);
fs::create_dir_all(&backup_path)?;
let db_path = root_path.join(DB_DIR);
if db_path.exists() {
fs::rename(&db_path, &backup_path.join(DB_DIR))?;
}
let txs_path = root_path.join(TX_SAVE_DIR);
if txs_path.exists() {
fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?;
}
let proofs_path = root_path.join(TX_PROOF_SAVE_DIR);
if proofs_path.exists() {
fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?;
}
self.connect()?;
Ok(())
}
/// Initialise with whatever stored credentials we have
fn open_with_credentials(&mut self) -> Result<()> {
let wallet_seed = WalletSeed::from_file(
&self.config,
&self.password.clone().ok_or(ErrorKind::OpenWalletError)?,
)
.map_err(|_| ErrorKind::OpenWalletError)?;
self.keychain = Some(
wallet_seed
.derive_keychain(global::is_floonet())
.map_err(|_| ErrorKind::DeriveKeychainError)?,
);
Ok(())
}
/// Close wallet and remove any stored credentials (TBD)
fn close(&mut self) -> Result<()> {
self.keychain = None;
Ok(())
}
/// Return the keychain being used
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
/// Return the node client being used
fn w2n_client(&mut self) -> &mut C {
&mut self.w2n_client
}
/// Set parent path by account name
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> {
let label = label.to_owned();
let res = self.accounts()?.find(|l| l.label == label);
if let Some(a) = res {
self.set_parent_key_id(&a.path);
Ok(())
} else {
return Err(ErrorKind::UnknownAccountLabel(label.clone()).into());
}
}
/// set parent path
fn set_parent_key_id(&mut self, id: &Identifier) {
self.parent_key_id = id.clone();
}
fn get_parent_key_id(&self) -> Identifier {
self.parent_key_id.clone()
}
fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> {
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id))
.map_err(|e| e.into())
}
fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> {
Ok(Box::new(
self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1),
))
}
fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> {
let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec());
self.db()?.get_ser(&key).map_err(|e| e.into())
}
fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[TX_LOG_ENTRY_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || {
format!("Slate id: {:x?}", slate_id.to_vec())
})?;
for i in 0..SECRET_KEY_SIZE {
ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i];
ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
Ok(ctx)
}
fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[ACCOUNT_PATH_MAPPING_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> {
let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec());
let ser = self.db()?.get_ser(&acct_key)?;
Ok(ser)
}
fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
if!path.exists() {
return Ok(None);
}
let tx_file = Path::new(&path).to_path_buf();
let mut tx_f = File::open(tx_file)?;
let mut content = String::new();
tx_f.read_to_string(&mut content)?;
let tx_bin = from_hex(content).unwrap();
Ok(Some(
ser::deserialize::<Transaction>(&mut &tx_bin[..], ser::ProtocolVersion(1)).unwrap(),
))
}
fn has_stored_tx_proof(&self, uuid: &str) -> Result<bool> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
Ok(tx_proof_file.exists())
}
fn get_stored_tx_proof(&self, uuid: &str) -> Result<Option<TxProof>> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
if!tx_proof_file.exists() {
return Ok(None);
}
let mut tx_proof_f = File::open(tx_proof_file)?;
let mut content = String::new();
tx_proof_f.read_to_string(&mut content)?;
Ok(Some(serde_json::from_str(&content)?))
}
fn batch<'a>(&'a self) -> Result<Box<dyn WalletBackendBatch<K> + 'a>> {
Ok(Box::new(Batch {
_store: self,
db: RefCell::new(Some(self.db()?.batch()?)),
keychain: self.keychain.clone(),
}))
}
fn next_child<'a>(&mut self) -> Result<Identifier> {
let mut deriv_idx = {
let batch = self.db()?.batch()?;
let deriv_key = to_key(DERIV_PREFIX, &mut self.parent_key_id.to_bytes().to_vec());
match batch.get_ser(&deriv_key)? {
Some(idx) => idx,
None => 0,
}
};
let mut return_path = self.parent_key_id.to_path();
return_path.depth = return_path.depth + 1;
return_path.path[return_path.depth as usize - 1] = ChildNumber::from(deriv_idx);
deriv_idx = deriv_idx + 1;
let mut batch = self.batch()?;
batch.save_child_index(&self.parent_key_id, deriv_idx)?;
batch.commit()?;
Ok(Identifier::from_path(&return_path))
}
fn get_last_confirmed_height<'a>(&self) -> Result<u64> {
let batch = self.db()?.batch()?;
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self.parent_key_id.to_bytes().to_vec(),
);
let last_confirmed_height = match batch.get_ser(&height_key)? {
Some(h) => h,
None => 0,
};
Ok(last_confirmed_height)
}
fn restore(&mut self) -> Result<()> {
restore::restore(self).context(ErrorKind::Restore)?; | fn check_repair(&mut self, delete_unconfirmed: bool) -> Result<()> {
restore::check_repair(self, delete_unconfirmed).context(ErrorKind::Restore)?;
Ok(())
}
fn calc_commit_for_cache(&mut self, amount: u64, id: &Identifier) -> Result<Option<String>> {
if self.config.no_commit_cache == Some(true) {
Ok(None)
} else {
Ok(Some(grin_util::to_hex(
self.keychain()
.commit(amount, id, &SwitchCommitmentType::Regular)?
.0
.to_vec(),
)))
}
}
}
/// An atomic batch in which all changes can be committed all at once or
/// discarded on error.
pub struct Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
_store: &'a Backend<C, K>,
db: RefCell<Option<grin_store::Batch<'a>>>,
/// Keychain
keychain: Option<K>,
}
#[allow(missing_docs)]
impl<'a, C, K> WalletBackendBatch<K> for Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
fn save_output(&mut self, out: &OutputData) -> Result<()> {
// Save the output data to the db.
{
let key = match out.mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec(), i),
None => to_key(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec()),
};
self.db.borrow().as_ref().unwrap().put_ser(&key, &out)?;
}
Ok(())
}
fn delete_output(&mut self, id: &Identifier, mmr_index: &Option<u64>) -> Result<()> {
// Delete the output data.
{
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
let _ = self.db.borrow().as_ref().unwrap().delete(&key);
}
Ok(())
}
fn store_tx(&self, uuid: &str, tx: &Transaction) -> Result<()> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let tx_hex = to_hex(ser::ser_vec(tx, ser::ProtocolVersion(1)).unwrap());
stored_tx.write_all(&tx_hex.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn store_tx_proof(&self, uuid: &str, tx_proof: &TxProof) -> Result<()> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let proof_ser = serde_json::to_string(tx_proof)?;
stored_tx.write_all(&proof_ser.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn next_tx_log_id(&mut self, parent_key_id: &Identifier) -> Result<u32> {
let tx_id_key = to_key(TX_LOG_ID_PREFIX, &mut parent_key_id.to_bytes().to_vec());
let last_tx_log_id = match self.db.borrow().as_ref().unwrap().get_ser(&tx_id_key)? {
Some(t) => t,
None => 0,
};
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_id_key, &(last_tx_log_id + 1))?;
Ok(last_tx_log_id)
}
fn save_last_confirmed_height(&mut self, height: u64) -> Result<()> {
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self._store.get_parent_key_id().to_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&height_key, &height)?;
Ok(())
}
fn save_child_index(&mut self, parent_key_id: &Identifier, index: u32) -> Result<()> {
let deriv_key = to_key(DERIV_PREFIX, &mut parent_key_id.to_bytes().to_vec());
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&deriv_key, &index)?;
Ok(())
}
fn save_tx_log_entry(&mut self, t: &TxLogEntry) -> Result<()> {
let tx_log_key = to_key_u64(
TX_LOG_ENTRY_PREFIX,
&mut t.parent_key_id.to_bytes().to_vec(),
t.id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_log_key, &t)?;
Ok(())
}
fn save_acct_path(&mut self, mapping: &AcctPathMapping) -> Result<()> {
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut mapping.label.as_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&acct_key, &mapping)?;
Ok(())
}
fn lock_output(&mut self, out: &mut OutputData) -> Result<()> {
out.lock();
self.save_output(out)
}
fn save_private_context(
&mut self,
slate_id: &[u8],
participant_id: usize,
ctx: &Context,
) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut s_ctx = ctx.clone();
for i in 0..SECRET_KEY_SIZE {
s_ctx.sec_key.0[i] = s_ctx.sec_key.0[i] ^ blind_xor_key[i];
s_ctx.sec_nonce.0[i] = s_ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&ctx_key, &s_ctx)?;
Ok(())
}
fn delete_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.delete(&ctx_key)
.map_err(|e| e.into())
}
fn commit(&mut self) -> Result<()> {
let db = self.db.replace(None);
db.unwrap().commit()?;
Ok(())
}
} | Ok(())
}
| random_line_split |
lmdb_backend.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::types::{
AcctPathMapping, ChildNumber, Context, Identifier, NodeClient, OutputData, Result, Transaction,
TxLogEntry, TxProof, WalletBackend, WalletBackendBatch, WalletSeed,
};
use crate::common::config::WalletConfig;
use crate::common::{ErrorKind, Keychain};
use crate::internal::restore;
use blake2_rfc::blake2b::Blake2b;
use chrono::Utc;
use failure::ResultExt;
use grin_core::{global, ser};
use grin_keychain::SwitchCommitmentType;
use grin_store::Store;
use grin_store::{self, option_to_not_found, to_key, to_key_u64};
use grin_util::secp::constants::SECRET_KEY_SIZE;
use grin_util::{from_hex, to_hex, ZeroingString};
use std::cell::RefCell;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::ops::Deref;
use std::path::Path;
pub const DB_DIR: &'static str = "db";
pub const TX_SAVE_DIR: &'static str = "saved_txs";
pub const TX_PROOF_SAVE_DIR: &'static str = "saved_proofs";
const OUTPUT_PREFIX: u8 = 'o' as u8;
const DERIV_PREFIX: u8 = 'd' as u8;
const CONFIRMED_HEIGHT_PREFIX: u8 = 'c' as u8;
const PRIVATE_TX_CONTEXT_PREFIX: u8 = 'p' as u8;
const TX_LOG_ENTRY_PREFIX: u8 = 't' as u8;
const TX_LOG_ID_PREFIX: u8 = 'i' as u8;
const ACCOUNT_PATH_MAPPING_PREFIX: u8 = 'a' as u8;
fn private_ctx_xor_keys<K>(
keychain: &K,
slate_id: &[u8],
) -> Result<([u8; SECRET_KEY_SIZE], [u8; SECRET_KEY_SIZE])>
where
K: Keychain,
{
let root_key = keychain.derive_key(0, &K::root_key_id(), &SwitchCommitmentType::None)?;
// derive XOR values for storing secret values in DB
// h(root_key|slate_id|"blind")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"blind".as_bytes()[..]);
let blind_xor_key = hasher.finalize();
let mut ret_blind = [0; SECRET_KEY_SIZE];
ret_blind.copy_from_slice(&blind_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
// h(root_key|slate_id|"nonce")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"nonce".as_bytes()[..]);
let nonce_xor_key = hasher.finalize();
let mut ret_nonce = [0; SECRET_KEY_SIZE];
ret_nonce.copy_from_slice(&nonce_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
Ok((ret_blind, ret_nonce))
}
pub struct Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
db: Option<Store>,
password: Option<ZeroingString>,
pub keychain: Option<K>,
parent_key_id: Identifier,
config: WalletConfig,
w2n_client: C,
}
impl<C, K> Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
fn db(&self) -> Result<&Store> {
self.db.as_ref().ok_or(ErrorKind::NoWallet.into())
}
/// Create `Backend` instance
pub fn new(config: &WalletConfig, client: C) -> Result<Self> {
Ok(Self {
db: None,
password: None,
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: client,
})
}
/*pub fn new(config: &WalletConfig, password: &str, n_client: C) -> Result<Self> {
let res = Backend {
db: None,
password: Some(ZeroingString::from(password)),
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: n_client,
};
Ok(res)
}*/
}
impl<C, K> WalletBackend<C, K> for Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
/// Check whether the backend has a seed or not
fn has_seed(&self) -> Result<bool> {
Ok(WalletSeed::seed_file_exists(&self.config).is_err())
}
/// Get the seed
fn get_seed(&self) -> Result<ZeroingString> {
match &self.password {
Some(p) => {
let seed = WalletSeed::from_file(&self.config, p)?;
seed.to_mnemonic().map(|s| s.into())
}
None => Err(ErrorKind::NoWallet.into()),
}
}
/// Set a new seed, encrypt with `password`
/// Should fail if backend already has a seed,
/// unless `overwrite` is set to `true
fn set_seed(
&mut self,
mnemonic: Option<ZeroingString>,
password: ZeroingString,
overwrite: bool,
) -> Result<()> {
if self.has_seed()? &&!overwrite {
return Err(ErrorKind::WalletHasSeed.into());
}
self.password = Some(password.clone());
let _ = WalletSeed::init_file(&self.config, 24, mnemonic, &password, overwrite)?;
Ok(())
}
/// Check if the backend connection is established
fn connected(&self) -> Result<bool> {
Ok(self.db.is_some())
}
/// Connect to the backend
fn connect(&mut self) -> Result<()> {
if!self.has_seed()? {
return Err(ErrorKind::WalletNoSeed.into());
}
if self.connected()? {
return Err(ErrorKind::WalletConnected.into());
}
let root_path = Path::new(&self.config.data_file_dir);
let db_path = root_path.join(DB_DIR);
fs::create_dir_all(&db_path)?;
let stored_tx_path = root_path.join(TX_SAVE_DIR);
fs::create_dir_all(&stored_tx_path)?;
let stored_tx_proof_path = root_path.join(TX_PROOF_SAVE_DIR);
fs::create_dir_all(&stored_tx_proof_path)?;
let store = Store::new(db_path.to_str().unwrap(), None, Some(DB_DIR), None)?;
let default_account = AcctPathMapping {
label: "default".to_string(),
path: K::derive_key_id(2, 0, 0, 0, 0),
};
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut default_account.label.as_bytes().to_vec(),
);
if!store.exists(&acct_key)? |
self.db = Some(store);
Ok(())
}
/// Disconnect from backend
fn disconnect(&mut self) -> Result<()> {
self.db = None;
Ok(())
}
/// Set password
fn set_password(&mut self, password: ZeroingString) -> Result<()> {
let _ = WalletSeed::from_file(&self.config, password.deref())?;
self.password = Some(password);
Ok(())
}
/// Clear out backend
fn clear(&mut self) -> Result<()> {
self.disconnect()?;
let root_path = Path::new(&self.config.data_file_dir);
if!root_path.exists() {
return Ok(());
}
let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string();
let backup_path = root_path.join("backups").join(backup_dir);
fs::create_dir_all(&backup_path)?;
let db_path = root_path.join(DB_DIR);
if db_path.exists() {
fs::rename(&db_path, &backup_path.join(DB_DIR))?;
}
let txs_path = root_path.join(TX_SAVE_DIR);
if txs_path.exists() {
fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?;
}
let proofs_path = root_path.join(TX_PROOF_SAVE_DIR);
if proofs_path.exists() {
fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?;
}
self.connect()?;
Ok(())
}
/// Initialise with whatever stored credentials we have
fn open_with_credentials(&mut self) -> Result<()> {
let wallet_seed = WalletSeed::from_file(
&self.config,
&self.password.clone().ok_or(ErrorKind::OpenWalletError)?,
)
.map_err(|_| ErrorKind::OpenWalletError)?;
self.keychain = Some(
wallet_seed
.derive_keychain(global::is_floonet())
.map_err(|_| ErrorKind::DeriveKeychainError)?,
);
Ok(())
}
/// Close wallet and remove any stored credentials (TBD)
fn close(&mut self) -> Result<()> {
self.keychain = None;
Ok(())
}
/// Return the keychain being used
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
/// Return the node client being used
fn w2n_client(&mut self) -> &mut C {
&mut self.w2n_client
}
/// Set parent path by account name
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> {
let label = label.to_owned();
let res = self.accounts()?.find(|l| l.label == label);
if let Some(a) = res {
self.set_parent_key_id(&a.path);
Ok(())
} else {
return Err(ErrorKind::UnknownAccountLabel(label.clone()).into());
}
}
/// set parent path
fn set_parent_key_id(&mut self, id: &Identifier) {
self.parent_key_id = id.clone();
}
fn get_parent_key_id(&self) -> Identifier {
self.parent_key_id.clone()
}
fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> {
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id))
.map_err(|e| e.into())
}
fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> {
Ok(Box::new(
self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1),
))
}
fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> {
let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec());
self.db()?.get_ser(&key).map_err(|e| e.into())
}
fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[TX_LOG_ENTRY_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || {
format!("Slate id: {:x?}", slate_id.to_vec())
})?;
for i in 0..SECRET_KEY_SIZE {
ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i];
ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
Ok(ctx)
}
fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[ACCOUNT_PATH_MAPPING_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> {
let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec());
let ser = self.db()?.get_ser(&acct_key)?;
Ok(ser)
}
fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
if!path.exists() {
return Ok(None);
}
let tx_file = Path::new(&path).to_path_buf();
let mut tx_f = File::open(tx_file)?;
let mut content = String::new();
tx_f.read_to_string(&mut content)?;
let tx_bin = from_hex(content).unwrap();
Ok(Some(
ser::deserialize::<Transaction>(&mut &tx_bin[..], ser::ProtocolVersion(1)).unwrap(),
))
}
fn has_stored_tx_proof(&self, uuid: &str) -> Result<bool> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
Ok(tx_proof_file.exists())
}
fn get_stored_tx_proof(&self, uuid: &str) -> Result<Option<TxProof>> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
if!tx_proof_file.exists() {
return Ok(None);
}
let mut tx_proof_f = File::open(tx_proof_file)?;
let mut content = String::new();
tx_proof_f.read_to_string(&mut content)?;
Ok(Some(serde_json::from_str(&content)?))
}
fn batch<'a>(&'a self) -> Result<Box<dyn WalletBackendBatch<K> + 'a>> {
Ok(Box::new(Batch {
_store: self,
db: RefCell::new(Some(self.db()?.batch()?)),
keychain: self.keychain.clone(),
}))
}
fn next_child<'a>(&mut self) -> Result<Identifier> {
let mut deriv_idx = {
let batch = self.db()?.batch()?;
let deriv_key = to_key(DERIV_PREFIX, &mut self.parent_key_id.to_bytes().to_vec());
match batch.get_ser(&deriv_key)? {
Some(idx) => idx,
None => 0,
}
};
let mut return_path = self.parent_key_id.to_path();
return_path.depth = return_path.depth + 1;
return_path.path[return_path.depth as usize - 1] = ChildNumber::from(deriv_idx);
deriv_idx = deriv_idx + 1;
let mut batch = self.batch()?;
batch.save_child_index(&self.parent_key_id, deriv_idx)?;
batch.commit()?;
Ok(Identifier::from_path(&return_path))
}
fn get_last_confirmed_height<'a>(&self) -> Result<u64> {
let batch = self.db()?.batch()?;
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self.parent_key_id.to_bytes().to_vec(),
);
let last_confirmed_height = match batch.get_ser(&height_key)? {
Some(h) => h,
None => 0,
};
Ok(last_confirmed_height)
}
fn restore(&mut self) -> Result<()> {
restore::restore(self).context(ErrorKind::Restore)?;
Ok(())
}
fn check_repair(&mut self, delete_unconfirmed: bool) -> Result<()> {
restore::check_repair(self, delete_unconfirmed).context(ErrorKind::Restore)?;
Ok(())
}
fn calc_commit_for_cache(&mut self, amount: u64, id: &Identifier) -> Result<Option<String>> {
if self.config.no_commit_cache == Some(true) {
Ok(None)
} else {
Ok(Some(grin_util::to_hex(
self.keychain()
.commit(amount, id, &SwitchCommitmentType::Regular)?
.0
.to_vec(),
)))
}
}
}
/// An atomic batch in which all changes can be committed all at once or
/// discarded on error.
pub struct Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
_store: &'a Backend<C, K>,
db: RefCell<Option<grin_store::Batch<'a>>>,
/// Keychain
keychain: Option<K>,
}
#[allow(missing_docs)]
impl<'a, C, K> WalletBackendBatch<K> for Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
fn save_output(&mut self, out: &OutputData) -> Result<()> {
// Save the output data to the db.
{
let key = match out.mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec(), i),
None => to_key(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec()),
};
self.db.borrow().as_ref().unwrap().put_ser(&key, &out)?;
}
Ok(())
}
fn delete_output(&mut self, id: &Identifier, mmr_index: &Option<u64>) -> Result<()> {
// Delete the output data.
{
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
let _ = self.db.borrow().as_ref().unwrap().delete(&key);
}
Ok(())
}
fn store_tx(&self, uuid: &str, tx: &Transaction) -> Result<()> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let tx_hex = to_hex(ser::ser_vec(tx, ser::ProtocolVersion(1)).unwrap());
stored_tx.write_all(&tx_hex.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn store_tx_proof(&self, uuid: &str, tx_proof: &TxProof) -> Result<()> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let proof_ser = serde_json::to_string(tx_proof)?;
stored_tx.write_all(&proof_ser.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn next_tx_log_id(&mut self, parent_key_id: &Identifier) -> Result<u32> {
let tx_id_key = to_key(TX_LOG_ID_PREFIX, &mut parent_key_id.to_bytes().to_vec());
let last_tx_log_id = match self.db.borrow().as_ref().unwrap().get_ser(&tx_id_key)? {
Some(t) => t,
None => 0,
};
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_id_key, &(last_tx_log_id + 1))?;
Ok(last_tx_log_id)
}
fn save_last_confirmed_height(&mut self, height: u64) -> Result<()> {
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self._store.get_parent_key_id().to_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&height_key, &height)?;
Ok(())
}
fn save_child_index(&mut self, parent_key_id: &Identifier, index: u32) -> Result<()> {
let deriv_key = to_key(DERIV_PREFIX, &mut parent_key_id.to_bytes().to_vec());
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&deriv_key, &index)?;
Ok(())
}
fn save_tx_log_entry(&mut self, t: &TxLogEntry) -> Result<()> {
let tx_log_key = to_key_u64(
TX_LOG_ENTRY_PREFIX,
&mut t.parent_key_id.to_bytes().to_vec(),
t.id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_log_key, &t)?;
Ok(())
}
fn save_acct_path(&mut self, mapping: &AcctPathMapping) -> Result<()> {
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut mapping.label.as_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&acct_key, &mapping)?;
Ok(())
}
fn lock_output(&mut self, out: &mut OutputData) -> Result<()> {
out.lock();
self.save_output(out)
}
fn save_private_context(
&mut self,
slate_id: &[u8],
participant_id: usize,
ctx: &Context,
) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut s_ctx = ctx.clone();
for i in 0..SECRET_KEY_SIZE {
s_ctx.sec_key.0[i] = s_ctx.sec_key.0[i] ^ blind_xor_key[i];
s_ctx.sec_nonce.0[i] = s_ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&ctx_key, &s_ctx)?;
Ok(())
}
fn delete_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.delete(&ctx_key)
.map_err(|e| e.into())
}
fn commit(&mut self) -> Result<()> {
let db = self.db.replace(None);
db.unwrap().commit()?;
Ok(())
}
}
| {
let batch = store.batch()?;
batch.put_ser(&acct_key, &default_account)?;
batch.commit()?;
} | conditional_block |
lmdb_backend.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::types::{
AcctPathMapping, ChildNumber, Context, Identifier, NodeClient, OutputData, Result, Transaction,
TxLogEntry, TxProof, WalletBackend, WalletBackendBatch, WalletSeed,
};
use crate::common::config::WalletConfig;
use crate::common::{ErrorKind, Keychain};
use crate::internal::restore;
use blake2_rfc::blake2b::Blake2b;
use chrono::Utc;
use failure::ResultExt;
use grin_core::{global, ser};
use grin_keychain::SwitchCommitmentType;
use grin_store::Store;
use grin_store::{self, option_to_not_found, to_key, to_key_u64};
use grin_util::secp::constants::SECRET_KEY_SIZE;
use grin_util::{from_hex, to_hex, ZeroingString};
use std::cell::RefCell;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::ops::Deref;
use std::path::Path;
pub const DB_DIR: &'static str = "db";
pub const TX_SAVE_DIR: &'static str = "saved_txs";
pub const TX_PROOF_SAVE_DIR: &'static str = "saved_proofs";
const OUTPUT_PREFIX: u8 = 'o' as u8;
const DERIV_PREFIX: u8 = 'd' as u8;
const CONFIRMED_HEIGHT_PREFIX: u8 = 'c' as u8;
const PRIVATE_TX_CONTEXT_PREFIX: u8 = 'p' as u8;
const TX_LOG_ENTRY_PREFIX: u8 = 't' as u8;
const TX_LOG_ID_PREFIX: u8 = 'i' as u8;
const ACCOUNT_PATH_MAPPING_PREFIX: u8 = 'a' as u8;
fn private_ctx_xor_keys<K>(
keychain: &K,
slate_id: &[u8],
) -> Result<([u8; SECRET_KEY_SIZE], [u8; SECRET_KEY_SIZE])>
where
K: Keychain,
{
let root_key = keychain.derive_key(0, &K::root_key_id(), &SwitchCommitmentType::None)?;
// derive XOR values for storing secret values in DB
// h(root_key|slate_id|"blind")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"blind".as_bytes()[..]);
let blind_xor_key = hasher.finalize();
let mut ret_blind = [0; SECRET_KEY_SIZE];
ret_blind.copy_from_slice(&blind_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
// h(root_key|slate_id|"nonce")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"nonce".as_bytes()[..]);
let nonce_xor_key = hasher.finalize();
let mut ret_nonce = [0; SECRET_KEY_SIZE];
ret_nonce.copy_from_slice(&nonce_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
Ok((ret_blind, ret_nonce))
}
pub struct Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
db: Option<Store>,
password: Option<ZeroingString>,
pub keychain: Option<K>,
parent_key_id: Identifier,
config: WalletConfig,
w2n_client: C,
}
impl<C, K> Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
fn db(&self) -> Result<&Store> {
self.db.as_ref().ok_or(ErrorKind::NoWallet.into())
}
/// Create `Backend` instance
pub fn new(config: &WalletConfig, client: C) -> Result<Self> {
Ok(Self {
db: None,
password: None,
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: client,
})
}
/*pub fn new(config: &WalletConfig, password: &str, n_client: C) -> Result<Self> {
let res = Backend {
db: None,
password: Some(ZeroingString::from(password)),
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: n_client,
};
Ok(res)
}*/
}
impl<C, K> WalletBackend<C, K> for Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
/// Check whether the backend has a seed or not
fn has_seed(&self) -> Result<bool> {
Ok(WalletSeed::seed_file_exists(&self.config).is_err())
}
/// Get the seed
fn get_seed(&self) -> Result<ZeroingString> {
match &self.password {
Some(p) => {
let seed = WalletSeed::from_file(&self.config, p)?;
seed.to_mnemonic().map(|s| s.into())
}
None => Err(ErrorKind::NoWallet.into()),
}
}
/// Set a new seed, encrypt with `password`
/// Should fail if backend already has a seed,
/// unless `overwrite` is set to `true
fn set_seed(
&mut self,
mnemonic: Option<ZeroingString>,
password: ZeroingString,
overwrite: bool,
) -> Result<()> {
if self.has_seed()? &&!overwrite {
return Err(ErrorKind::WalletHasSeed.into());
}
self.password = Some(password.clone());
let _ = WalletSeed::init_file(&self.config, 24, mnemonic, &password, overwrite)?;
Ok(())
}
/// Check if the backend connection is established
fn connected(&self) -> Result<bool> {
Ok(self.db.is_some())
}
/// Connect to the backend
fn connect(&mut self) -> Result<()> {
if!self.has_seed()? {
return Err(ErrorKind::WalletNoSeed.into());
}
if self.connected()? {
return Err(ErrorKind::WalletConnected.into());
}
let root_path = Path::new(&self.config.data_file_dir);
let db_path = root_path.join(DB_DIR);
fs::create_dir_all(&db_path)?;
let stored_tx_path = root_path.join(TX_SAVE_DIR);
fs::create_dir_all(&stored_tx_path)?;
let stored_tx_proof_path = root_path.join(TX_PROOF_SAVE_DIR);
fs::create_dir_all(&stored_tx_proof_path)?;
let store = Store::new(db_path.to_str().unwrap(), None, Some(DB_DIR), None)?;
let default_account = AcctPathMapping {
label: "default".to_string(),
path: K::derive_key_id(2, 0, 0, 0, 0),
};
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut default_account.label.as_bytes().to_vec(),
);
if!store.exists(&acct_key)? {
let batch = store.batch()?;
batch.put_ser(&acct_key, &default_account)?;
batch.commit()?;
}
self.db = Some(store);
Ok(())
}
/// Disconnect from backend
fn disconnect(&mut self) -> Result<()> {
self.db = None;
Ok(())
}
/// Set password
fn set_password(&mut self, password: ZeroingString) -> Result<()> {
let _ = WalletSeed::from_file(&self.config, password.deref())?;
self.password = Some(password);
Ok(())
}
/// Clear out backend
fn clear(&mut self) -> Result<()> {
self.disconnect()?;
let root_path = Path::new(&self.config.data_file_dir);
if!root_path.exists() {
return Ok(());
}
let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string();
let backup_path = root_path.join("backups").join(backup_dir);
fs::create_dir_all(&backup_path)?;
let db_path = root_path.join(DB_DIR);
if db_path.exists() {
fs::rename(&db_path, &backup_path.join(DB_DIR))?;
}
let txs_path = root_path.join(TX_SAVE_DIR);
if txs_path.exists() {
fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?;
}
let proofs_path = root_path.join(TX_PROOF_SAVE_DIR);
if proofs_path.exists() {
fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?;
}
self.connect()?;
Ok(())
}
/// Initialise with whatever stored credentials we have
fn open_with_credentials(&mut self) -> Result<()> {
let wallet_seed = WalletSeed::from_file(
&self.config,
&self.password.clone().ok_or(ErrorKind::OpenWalletError)?,
)
.map_err(|_| ErrorKind::OpenWalletError)?;
self.keychain = Some(
wallet_seed
.derive_keychain(global::is_floonet())
.map_err(|_| ErrorKind::DeriveKeychainError)?,
);
Ok(())
}
/// Close wallet and remove any stored credentials (TBD)
fn close(&mut self) -> Result<()> {
self.keychain = None;
Ok(())
}
/// Return the keychain being used
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
/// Return the node client being used
fn w2n_client(&mut self) -> &mut C {
&mut self.w2n_client
}
/// Set parent path by account name
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> {
let label = label.to_owned();
let res = self.accounts()?.find(|l| l.label == label);
if let Some(a) = res {
self.set_parent_key_id(&a.path);
Ok(())
} else {
return Err(ErrorKind::UnknownAccountLabel(label.clone()).into());
}
}
/// set parent path
fn set_parent_key_id(&mut self, id: &Identifier) {
self.parent_key_id = id.clone();
}
fn get_parent_key_id(&self) -> Identifier {
self.parent_key_id.clone()
}
fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> {
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id))
.map_err(|e| e.into())
}
fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> {
Ok(Box::new(
self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1),
))
}
fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> {
let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec());
self.db()?.get_ser(&key).map_err(|e| e.into())
}
fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[TX_LOG_ENTRY_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> |
fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[ACCOUNT_PATH_MAPPING_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> {
let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec());
let ser = self.db()?.get_ser(&acct_key)?;
Ok(ser)
}
fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
if!path.exists() {
return Ok(None);
}
let tx_file = Path::new(&path).to_path_buf();
let mut tx_f = File::open(tx_file)?;
let mut content = String::new();
tx_f.read_to_string(&mut content)?;
let tx_bin = from_hex(content).unwrap();
Ok(Some(
ser::deserialize::<Transaction>(&mut &tx_bin[..], ser::ProtocolVersion(1)).unwrap(),
))
}
fn has_stored_tx_proof(&self, uuid: &str) -> Result<bool> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
Ok(tx_proof_file.exists())
}
fn get_stored_tx_proof(&self, uuid: &str) -> Result<Option<TxProof>> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
if!tx_proof_file.exists() {
return Ok(None);
}
let mut tx_proof_f = File::open(tx_proof_file)?;
let mut content = String::new();
tx_proof_f.read_to_string(&mut content)?;
Ok(Some(serde_json::from_str(&content)?))
}
fn batch<'a>(&'a self) -> Result<Box<dyn WalletBackendBatch<K> + 'a>> {
Ok(Box::new(Batch {
_store: self,
db: RefCell::new(Some(self.db()?.batch()?)),
keychain: self.keychain.clone(),
}))
}
fn next_child<'a>(&mut self) -> Result<Identifier> {
let mut deriv_idx = {
let batch = self.db()?.batch()?;
let deriv_key = to_key(DERIV_PREFIX, &mut self.parent_key_id.to_bytes().to_vec());
match batch.get_ser(&deriv_key)? {
Some(idx) => idx,
None => 0,
}
};
let mut return_path = self.parent_key_id.to_path();
return_path.depth = return_path.depth + 1;
return_path.path[return_path.depth as usize - 1] = ChildNumber::from(deriv_idx);
deriv_idx = deriv_idx + 1;
let mut batch = self.batch()?;
batch.save_child_index(&self.parent_key_id, deriv_idx)?;
batch.commit()?;
Ok(Identifier::from_path(&return_path))
}
fn get_last_confirmed_height<'a>(&self) -> Result<u64> {
let batch = self.db()?.batch()?;
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self.parent_key_id.to_bytes().to_vec(),
);
let last_confirmed_height = match batch.get_ser(&height_key)? {
Some(h) => h,
None => 0,
};
Ok(last_confirmed_height)
}
fn restore(&mut self) -> Result<()> {
restore::restore(self).context(ErrorKind::Restore)?;
Ok(())
}
fn check_repair(&mut self, delete_unconfirmed: bool) -> Result<()> {
restore::check_repair(self, delete_unconfirmed).context(ErrorKind::Restore)?;
Ok(())
}
fn calc_commit_for_cache(&mut self, amount: u64, id: &Identifier) -> Result<Option<String>> {
if self.config.no_commit_cache == Some(true) {
Ok(None)
} else {
Ok(Some(grin_util::to_hex(
self.keychain()
.commit(amount, id, &SwitchCommitmentType::Regular)?
.0
.to_vec(),
)))
}
}
}
/// An atomic batch in which all changes can be committed all at once or
/// discarded on error.
pub struct Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
_store: &'a Backend<C, K>,
db: RefCell<Option<grin_store::Batch<'a>>>,
/// Keychain
keychain: Option<K>,
}
#[allow(missing_docs)]
impl<'a, C, K> WalletBackendBatch<K> for Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
fn save_output(&mut self, out: &OutputData) -> Result<()> {
// Save the output data to the db.
{
let key = match out.mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec(), i),
None => to_key(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec()),
};
self.db.borrow().as_ref().unwrap().put_ser(&key, &out)?;
}
Ok(())
}
fn delete_output(&mut self, id: &Identifier, mmr_index: &Option<u64>) -> Result<()> {
// Delete the output data.
{
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
let _ = self.db.borrow().as_ref().unwrap().delete(&key);
}
Ok(())
}
fn store_tx(&self, uuid: &str, tx: &Transaction) -> Result<()> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let tx_hex = to_hex(ser::ser_vec(tx, ser::ProtocolVersion(1)).unwrap());
stored_tx.write_all(&tx_hex.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn store_tx_proof(&self, uuid: &str, tx_proof: &TxProof) -> Result<()> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let proof_ser = serde_json::to_string(tx_proof)?;
stored_tx.write_all(&proof_ser.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn next_tx_log_id(&mut self, parent_key_id: &Identifier) -> Result<u32> {
let tx_id_key = to_key(TX_LOG_ID_PREFIX, &mut parent_key_id.to_bytes().to_vec());
let last_tx_log_id = match self.db.borrow().as_ref().unwrap().get_ser(&tx_id_key)? {
Some(t) => t,
None => 0,
};
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_id_key, &(last_tx_log_id + 1))?;
Ok(last_tx_log_id)
}
fn save_last_confirmed_height(&mut self, height: u64) -> Result<()> {
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self._store.get_parent_key_id().to_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&height_key, &height)?;
Ok(())
}
fn save_child_index(&mut self, parent_key_id: &Identifier, index: u32) -> Result<()> {
let deriv_key = to_key(DERIV_PREFIX, &mut parent_key_id.to_bytes().to_vec());
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&deriv_key, &index)?;
Ok(())
}
fn save_tx_log_entry(&mut self, t: &TxLogEntry) -> Result<()> {
let tx_log_key = to_key_u64(
TX_LOG_ENTRY_PREFIX,
&mut t.parent_key_id.to_bytes().to_vec(),
t.id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_log_key, &t)?;
Ok(())
}
fn save_acct_path(&mut self, mapping: &AcctPathMapping) -> Result<()> {
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut mapping.label.as_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&acct_key, &mapping)?;
Ok(())
}
fn lock_output(&mut self, out: &mut OutputData) -> Result<()> {
out.lock();
self.save_output(out)
}
fn save_private_context(
&mut self,
slate_id: &[u8],
participant_id: usize,
ctx: &Context,
) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut s_ctx = ctx.clone();
for i in 0..SECRET_KEY_SIZE {
s_ctx.sec_key.0[i] = s_ctx.sec_key.0[i] ^ blind_xor_key[i];
s_ctx.sec_nonce.0[i] = s_ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&ctx_key, &s_ctx)?;
Ok(())
}
fn delete_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.delete(&ctx_key)
.map_err(|e| e.into())
}
fn commit(&mut self) -> Result<()> {
let db = self.db.replace(None);
db.unwrap().commit()?;
Ok(())
}
}
| {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || {
format!("Slate id: {:x?}", slate_id.to_vec())
})?;
for i in 0..SECRET_KEY_SIZE {
ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i];
ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
Ok(ctx)
} | identifier_body |
lib.rs | let mut h = HashMap::default();
//! h.insert(
//! "MyProgram", "
//! (def (Report
//! (volatile minrtt +infinity)
//! ))
//! (when true
//! (:= Report.minrtt (min Report.minrtt Flow.rtt_sample_us))
//! )
//! (when (> Micros 42000)
//! (report)
//! (reset)
//! )
//! ".to_owned(),
//! );
//! h
//! }
//! fn new_flow(&self, mut control: Datapath<I>, info: DatapathInfo) -> Self::Flow {
//! let sc = control.set_program("MyProgram", None).unwrap();
//! MyCongestionControlAlgorithm(sc)
//! }
//! }
//! impl Flow for MyCongestionControlAlgorithm {
//! fn on_report(&mut self, sock_id: u32, m: Report) {
//! println!("minrtt: {:?}", m.get_field("Report.minrtt", &self.0).unwrap());
//! }
//! }
//! ```
#![feature(box_patterns)]
#![feature(integer_atomics)]
#![feature(never_type)]
#![feature(stmt_expr_attributes)]
#![feature(test)]
extern crate bytes;
extern crate clap;
extern crate crossbeam;
extern crate fnv;
extern crate libc;
extern crate nix;
#[macro_use]
extern crate nom;
extern crate time;
#[macro_use]
extern crate slog;
extern crate slog_async;
extern crate slog_term;
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::{atomic, Arc};
use std::thread;
pub mod ipc;
pub mod lang;
pub mod serialize;
pub mod test_helper;
#[macro_use]
pub mod algs;
mod errors;
pub use crate::errors::*;
use crate::ipc::Ipc;
use crate::ipc::{BackendBuilder, BackendSender};
use crate::lang::{Bin, Reg, Scope};
use crate::serialize::Msg;
/// CCP custom `Result` type, using `Error` as the `Err` type.
pub type Result<T> = std::result::Result<T, Error>;
/// A collection of methods to interact with the datapath.
pub trait DatapathTrait {
fn get_sock_id(&self) -> u32;
/// Tell datapath to use a preinstalled program.
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>, | fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()>;
}
/// A collection of methods to interact with the datapath.
#[derive(Clone)]
pub struct Datapath<T: Ipc> {
sock_id: u32,
sender: BackendSender<T>,
programs: Rc<HashMap<String, Scope>>,
}
impl<T: Ipc> DatapathTrait for Datapath<T> {
fn get_sock_id(&self) -> u32 {
self.sock_id
}
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>,
) -> Result<Scope> {
// if the program with this key exists, return it; otherwise return nothing
match self.programs.get(program_name) {
Some(sc) => {
// apply optional updates to values of registers in this scope
let fields: Vec<(Reg, u64)> = fields
.unwrap_or_else(|| &[])
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::changeprog::Msg {
sid: self.sock_id,
program_uid: sc.program_uid,
num_fields: fields.len() as u32,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(sc.clone())
}
_ => Err(Error(format!(
"Map does not contain datapath program with key: {:?}",
program_name
))),
}
}
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()> {
let fields: Vec<(Reg, u64)> = update
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::update_field::Msg {
sid: self.sock_id,
num_fields: fields.len() as u8,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(())
}
}
fn send_and_install<I>(sock_id: u32, sender: &BackendSender<I>, bin: Bin, sc: &Scope) -> Result<()>
where
I: Ipc,
{
let msg = serialize::install::Msg {
sid: sock_id,
program_uid: sc.program_uid,
num_events: bin.events.len() as u32,
num_instrs: bin.instrs.len() as u32,
instrs: bin,
};
let buf = serialize::serialize(&msg)?;
sender.send_msg(&buf[..])?;
Ok(())
}
/// Configuration parameters for the portus runtime.
/// Defines a `slog::Logger` to use for (optional) logging
#[derive(Clone, Default)]
pub struct Config {
pub logger: Option<slog::Logger>,
}
/// The set of information passed by the datapath to CCP
/// when a connection starts. It includes a unique 5-tuple (CCP socket id + source and destination
/// IP and port), the initial congestion window (`init_cwnd`), and flow MSS.
#[derive(Debug, Clone)]
pub struct DatapathInfo {
pub sock_id: u32,
pub init_cwnd: u32,
pub mss: u32,
pub src_ip: u32,
pub src_port: u32,
pub dst_ip: u32,
pub dst_port: u32,
}
/// Contains the values of the pre-defined Report struct from the fold function.
/// Use `get_field` to query its values using the names defined in the fold function.
pub struct Report {
pub program_uid: u32,
fields: Vec<u64>,
}
impl Report {
/// Uses the `Scope` returned by `lang::compile` (or `install`) to query
/// the `Report` for its values.
pub fn get_field(&self, field: &str, sc: &Scope) -> Result<u64> {
if sc.program_uid!= self.program_uid {
return Err(Error::from(StaleProgramError));
}
match sc.get(field) {
Some(r) => match *r {
Reg::Report(idx, _, _) => {
if idx as usize >= self.fields.len() {
Err(Error::from(InvalidReportError))
} else {
Ok(self.fields[idx as usize])
}
}
_ => Err(Error::from(InvalidRegTypeError)),
},
None => Err(Error::from(FieldNotFoundError)),
}
}
}
/// Implement this trait, [`portus::CongAlg`](./trait.CongAlg.html), and
///[`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) to define a CCP congestion control
/// algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait Flow {
/// This callback specifies the algorithm's behavior when it receives a report
/// of measurements from the datapath.
fn on_report(&mut self, sock_id: u32, m: Report);
/// Optionally specify what the algorithm should do when the flow ends,
/// e.g., clean up any external resources.
/// The default implementation does nothing.
fn close(&mut self) {}
}
impl<T> Flow for Box<T>
where
T: Flow +?Sized,
{
fn on_report(&mut self, sock_id: u32, m: Report) {
T::on_report(self, sock_id, m)
}
fn close(&mut self) {
T::close(self)
}
}
/// implement this trait, [`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) and
/// [`portus::Flow`](./trait.Flow.html) to define a ccp congestion control algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait CongAlg<I: Ipc> {
/// A type which implements the [`portus::Flow`](./trait.Flow.html) trait, to manage
/// an individual connection.
type Flow: Flow;
/// A unique name for the algorithm.
fn name() -> &'static str;
/// `datapath_programs` returns all datapath programs the congestion control algorithm
/// will to use during its execution. It is called once, when Portus initializes
/// ([`portus::run`](./fn.run.html) or [`portus::spawn`](./fn.spawn.html)).
///
/// It should return a vector of string tuples, where the first string in each tuple is a unique name
/// identifying the program, and the second string is the code for the program itself.
///
/// The Portus runtime will panic if any of the datapath programs do not compile.
///
/// For example,
/// ```
/// extern crate fnv;
/// use std::collections::HashMap;
/// let mut h = HashMap::new();
/// h.insert("prog1", "...(program)...".to_string());
/// h.insert("prog2", "...(program)...".to_string());
/// ```
fn datapath_programs(&self) -> HashMap<&'static str, String>;
/// Create a new instance of the CongAlg to manage a new flow.
/// Optionally copy any configuration parameters from `&self`.
fn new_flow(&self, control: Datapath<I>, info: DatapathInfo) -> Self::Flow;
}
/// Structs implementing [`portus::CongAlg`](./trait.CongAlg.html) must also implement this trait
/// (and must be annotated with [`portus_export::register_ccp_alg`]())
///
/// The expected use of this trait in a calling program is as follows:
/// ```no-run
/// let args = CongAlgBuilder::args();
/// let matches = app.get_matches_from(std::env::args_os());
/// let alg = CongAlgBuilder::with_arg_matches(matches);
/// ```
pub trait CongAlgBuilder<'a, 'b> {
/// This function should return a new
/// [`clap::App`](https://docs.rs/clap/2.32.0/clap/struct.App.html) that describes the
/// arguments this algorithm needs to create an instance of itself.
fn args() -> clap::App<'a, 'b>;
/// This function takes as input the set of parsed arguments and uses them to parameterize a
/// new instance of this congestion control algorithm. The matches will be derived from
/// running `Clap::App::get_matches_from` on the `clap::App` returned by the `register` function.
/// It also takes an instsance of a logger so that the calling program can define the logging
/// behavior (eg. format and redirection).
fn with_arg_matches(args: &clap::ArgMatches, logger: Option<slog::Logger>) -> Result<Self>
where
Self: Sized;
}
/// A handle to manage running instances of the CCP execution loop.
#[derive(Debug)]
pub struct CCPHandle {
pub continue_listening: Arc<atomic::AtomicBool>,
pub join_handle: thread::JoinHandle<Result<()>>,
}
impl CCPHandle {
/// Instruct the execution loop to exit.
pub fn kill(&self) {
self.continue_listening
.store(false, atomic::Ordering::SeqCst);
}
// TODO: join_handle.join() returns an Err instead of Ok, because
// some function panicked, this function should return an error
// with the same string from the panic.
/// Collect the error from the thread running the CCP execution loop
/// once it exits.
pub fn wait(self) -> Result<()> {
match self.join_handle.join() {
Ok(r) => r,
Err(_) => Err(Error(String::from("Call to run_inner panicked"))),
}
}
}
/// Main execution loop of CCP for the static pipeline use case.
/// The `run` method blocks 'forever'; it only returns in two cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
///
/// Callers must construct a `BackendBuilder` and a `Config`.
/// Algorithm implementations should
/// 1. Initializes an ipc backendbuilder (depending on the datapath).
/// 2. Calls `run()`, or `spawn() `passing the `BackendBuilder b` and a `Config` with optional
/// logger and command line argument structure.
/// Run() or spawn() create arc<AtomicBool> objects,
/// which are passed into run_inner to build the backend, so spawn() can create a CCPHandle that references this
/// boolean to kill the thread.
pub fn run<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> Result<!>
where
I: Ipc,
U: CongAlg<I>,
{
// call run_inner
match run_inner(
Arc::new(atomic::AtomicBool::new(true)),
backend_builder,
cfg,
alg,
) {
Ok(_) => unreachable!(),
Err(e) => Err(e),
}
}
/// Spawn a thread which will perform the CCP execution loop. Returns
/// a `CCPHandle`, which the caller can use to cause the execution loop
/// to stop.
/// The `run` method blocks 'forever'; it only returns in three cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
/// 3. The caller calls `CCPHandle::kill()`
///
/// See [`run`](./fn.run.html) for more information.
pub fn spawn<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> CCPHandle
where
I: Ipc,
U: CongAlg<I> +'static + Send,
{
let stop_signal = Arc::new(atomic::AtomicBool::new(true));
CCPHandle {
continue_listening: stop_signal.clone(),
join_handle: thread::spawn(move || run_inner(stop_signal, backend_builder, cfg, alg)),
}
}
// Main execution inner loop of ccp.
// Blocks "forever", or until the iterator stops iterating.
//
// `run_inner()`:
// 1. listens for messages from the datapath
// 2. call the appropriate message in `U: impl CongAlg`
// The function can return for two reasons: an error, or the iterator returned None.
// The latter should only happen for spawn(), and not for run().
// It returns any error, either from:
// 1. the IPC channel failing
// 2. Receiving an install control message (only the datapath should receive these).
fn run_inner<I, U>(
continue_listening: Arc<atomic::AtomicBool>,
backend_builder: BackendBuilder<I>,
cfg: Config,
alg: U,
) -> Result<()>
where
I: Ipc,
U: CongAlg<I>,
{
let mut receive_buf = [0u8; 1024];
let mut b = backend_builder.build(continue_listening.clone(), &mut receive_buf[..]);
let mut flows = HashMap::<u32, U::Flow>::default();
let backend = b.sender();
if let Some(log) = cfg.logger.as_ref() {
info!(log, "starting CCP";
"algorithm" => U::name(),
"ipc" => I::name(),
);
}
let mut scope_map = Rc::new(HashMap::<String, Scope>::default());
let programs = alg.datapath_programs();
for (program_name, program) in programs.iter() {
match lang::compile(program.as_bytes(), &[]) {
Ok((bin, sc)) => {
match send_and_install(0, &backend, bin, &sc) {
Ok(_) => {}
Err(e) => {
return Err(Error(format!(
"Failed to install datapath program \"{}\": {:?}",
program_name, e
)));
}
}
Rc::get_mut(&mut scope_map)
.unwrap()
.insert(program_name.to_string(), sc.clone());
}
Err(e) => {
return Err(Error(format!(
"Datapath program \"{}\" failed to compile: {:?}",
program_name, e
)));
}
}
}
while let Some(msg) = b.next() {
match msg {
Msg::Cr(c) => {
if flows.remove(&c.sid).is_some() {
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "re-creating already created flow"; "sid" => c.sid);
}
}
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "creating new flow";
"sid" => c.sid,
"init_cwnd" => c.init_cwnd,
"mss" => c.mss,
"src_ip" => c.src_ip,
"src_port" => c.src_port,
"dst_ip" => c.dst_ip,
"dst_port" => c.dst_port,
);
}
let f = alg.new_flow(
Datapath {
sock_id: c.sid,
sender: backend.clone(),
programs: scope_map.clone(),
},
DatapathInfo {
sock_id: c.sid,
init_cwnd: c.init_cwnd,
mss: c.mss,
src_ip: c.src_ | ) -> Result<Scope>;
/// Update the value of a register in an already-installed fold function. | random_line_split |
lib.rs | let mut h = HashMap::default();
//! h.insert(
//! "MyProgram", "
//! (def (Report
//! (volatile minrtt +infinity)
//! ))
//! (when true
//! (:= Report.minrtt (min Report.minrtt Flow.rtt_sample_us))
//! )
//! (when (> Micros 42000)
//! (report)
//! (reset)
//! )
//! ".to_owned(),
//! );
//! h
//! }
//! fn new_flow(&self, mut control: Datapath<I>, info: DatapathInfo) -> Self::Flow {
//! let sc = control.set_program("MyProgram", None).unwrap();
//! MyCongestionControlAlgorithm(sc)
//! }
//! }
//! impl Flow for MyCongestionControlAlgorithm {
//! fn on_report(&mut self, sock_id: u32, m: Report) {
//! println!("minrtt: {:?}", m.get_field("Report.minrtt", &self.0).unwrap());
//! }
//! }
//! ```
#![feature(box_patterns)]
#![feature(integer_atomics)]
#![feature(never_type)]
#![feature(stmt_expr_attributes)]
#![feature(test)]
extern crate bytes;
extern crate clap;
extern crate crossbeam;
extern crate fnv;
extern crate libc;
extern crate nix;
#[macro_use]
extern crate nom;
extern crate time;
#[macro_use]
extern crate slog;
extern crate slog_async;
extern crate slog_term;
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::{atomic, Arc};
use std::thread;
pub mod ipc;
pub mod lang;
pub mod serialize;
pub mod test_helper;
#[macro_use]
pub mod algs;
mod errors;
pub use crate::errors::*;
use crate::ipc::Ipc;
use crate::ipc::{BackendBuilder, BackendSender};
use crate::lang::{Bin, Reg, Scope};
use crate::serialize::Msg;
/// CCP custom `Result` type, using `Error` as the `Err` type.
pub type Result<T> = std::result::Result<T, Error>;
/// A collection of methods to interact with the datapath.
pub trait DatapathTrait {
fn get_sock_id(&self) -> u32;
/// Tell datapath to use a preinstalled program.
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>,
) -> Result<Scope>;
/// Update the value of a register in an already-installed fold function.
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()>;
}
/// A collection of methods to interact with the datapath.
#[derive(Clone)]
pub struct Datapath<T: Ipc> {
sock_id: u32,
sender: BackendSender<T>,
programs: Rc<HashMap<String, Scope>>,
}
impl<T: Ipc> DatapathTrait for Datapath<T> {
fn get_sock_id(&self) -> u32 {
self.sock_id
}
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>,
) -> Result<Scope> {
// if the program with this key exists, return it; otherwise return nothing
match self.programs.get(program_name) {
Some(sc) => {
// apply optional updates to values of registers in this scope
let fields: Vec<(Reg, u64)> = fields
.unwrap_or_else(|| &[])
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::changeprog::Msg {
sid: self.sock_id,
program_uid: sc.program_uid,
num_fields: fields.len() as u32,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(sc.clone())
}
_ => Err(Error(format!(
"Map does not contain datapath program with key: {:?}",
program_name
))),
}
}
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()> {
let fields: Vec<(Reg, u64)> = update
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::update_field::Msg {
sid: self.sock_id,
num_fields: fields.len() as u8,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(())
}
}
fn send_and_install<I>(sock_id: u32, sender: &BackendSender<I>, bin: Bin, sc: &Scope) -> Result<()>
where
I: Ipc,
{
let msg = serialize::install::Msg {
sid: sock_id,
program_uid: sc.program_uid,
num_events: bin.events.len() as u32,
num_instrs: bin.instrs.len() as u32,
instrs: bin,
};
let buf = serialize::serialize(&msg)?;
sender.send_msg(&buf[..])?;
Ok(())
}
/// Configuration parameters for the portus runtime.
/// Defines a `slog::Logger` to use for (optional) logging
#[derive(Clone, Default)]
pub struct Config {
pub logger: Option<slog::Logger>,
}
/// The set of information passed by the datapath to CCP
/// when a connection starts. It includes a unique 5-tuple (CCP socket id + source and destination
/// IP and port), the initial congestion window (`init_cwnd`), and flow MSS.
#[derive(Debug, Clone)]
pub struct | {
pub sock_id: u32,
pub init_cwnd: u32,
pub mss: u32,
pub src_ip: u32,
pub src_port: u32,
pub dst_ip: u32,
pub dst_port: u32,
}
/// Contains the values of the pre-defined Report struct from the fold function.
/// Use `get_field` to query its values using the names defined in the fold function.
pub struct Report {
pub program_uid: u32,
fields: Vec<u64>,
}
impl Report {
/// Uses the `Scope` returned by `lang::compile` (or `install`) to query
/// the `Report` for its values.
pub fn get_field(&self, field: &str, sc: &Scope) -> Result<u64> {
if sc.program_uid!= self.program_uid {
return Err(Error::from(StaleProgramError));
}
match sc.get(field) {
Some(r) => match *r {
Reg::Report(idx, _, _) => {
if idx as usize >= self.fields.len() {
Err(Error::from(InvalidReportError))
} else {
Ok(self.fields[idx as usize])
}
}
_ => Err(Error::from(InvalidRegTypeError)),
},
None => Err(Error::from(FieldNotFoundError)),
}
}
}
/// Implement this trait, [`portus::CongAlg`](./trait.CongAlg.html), and
///[`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) to define a CCP congestion control
/// algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait Flow {
/// This callback specifies the algorithm's behavior when it receives a report
/// of measurements from the datapath.
fn on_report(&mut self, sock_id: u32, m: Report);
/// Optionally specify what the algorithm should do when the flow ends,
/// e.g., clean up any external resources.
/// The default implementation does nothing.
fn close(&mut self) {}
}
impl<T> Flow for Box<T>
where
T: Flow +?Sized,
{
fn on_report(&mut self, sock_id: u32, m: Report) {
T::on_report(self, sock_id, m)
}
fn close(&mut self) {
T::close(self)
}
}
/// implement this trait, [`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) and
/// [`portus::Flow`](./trait.Flow.html) to define a ccp congestion control algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait CongAlg<I: Ipc> {
/// A type which implements the [`portus::Flow`](./trait.Flow.html) trait, to manage
/// an individual connection.
type Flow: Flow;
/// A unique name for the algorithm.
fn name() -> &'static str;
/// `datapath_programs` returns all datapath programs the congestion control algorithm
/// will to use during its execution. It is called once, when Portus initializes
/// ([`portus::run`](./fn.run.html) or [`portus::spawn`](./fn.spawn.html)).
///
/// It should return a vector of string tuples, where the first string in each tuple is a unique name
/// identifying the program, and the second string is the code for the program itself.
///
/// The Portus runtime will panic if any of the datapath programs do not compile.
///
/// For example,
/// ```
/// extern crate fnv;
/// use std::collections::HashMap;
/// let mut h = HashMap::new();
/// h.insert("prog1", "...(program)...".to_string());
/// h.insert("prog2", "...(program)...".to_string());
/// ```
fn datapath_programs(&self) -> HashMap<&'static str, String>;
/// Create a new instance of the CongAlg to manage a new flow.
/// Optionally copy any configuration parameters from `&self`.
fn new_flow(&self, control: Datapath<I>, info: DatapathInfo) -> Self::Flow;
}
/// Structs implementing [`portus::CongAlg`](./trait.CongAlg.html) must also implement this trait
/// (and must be annotated with [`portus_export::register_ccp_alg`]())
///
/// The expected use of this trait in a calling program is as follows:
/// ```no-run
/// let args = CongAlgBuilder::args();
/// let matches = app.get_matches_from(std::env::args_os());
/// let alg = CongAlgBuilder::with_arg_matches(matches);
/// ```
pub trait CongAlgBuilder<'a, 'b> {
/// This function should return a new
/// [`clap::App`](https://docs.rs/clap/2.32.0/clap/struct.App.html) that describes the
/// arguments this algorithm needs to create an instance of itself.
fn args() -> clap::App<'a, 'b>;
/// This function takes as input the set of parsed arguments and uses them to parameterize a
/// new instance of this congestion control algorithm. The matches will be derived from
/// running `Clap::App::get_matches_from` on the `clap::App` returned by the `register` function.
/// It also takes an instsance of a logger so that the calling program can define the logging
/// behavior (eg. format and redirection).
fn with_arg_matches(args: &clap::ArgMatches, logger: Option<slog::Logger>) -> Result<Self>
where
Self: Sized;
}
/// A handle to manage running instances of the CCP execution loop.
#[derive(Debug)]
pub struct CCPHandle {
pub continue_listening: Arc<atomic::AtomicBool>,
pub join_handle: thread::JoinHandle<Result<()>>,
}
impl CCPHandle {
/// Instruct the execution loop to exit.
pub fn kill(&self) {
self.continue_listening
.store(false, atomic::Ordering::SeqCst);
}
// TODO: join_handle.join() returns an Err instead of Ok, because
// some function panicked, this function should return an error
// with the same string from the panic.
/// Collect the error from the thread running the CCP execution loop
/// once it exits.
pub fn wait(self) -> Result<()> {
match self.join_handle.join() {
Ok(r) => r,
Err(_) => Err(Error(String::from("Call to run_inner panicked"))),
}
}
}
/// Main execution loop of CCP for the static pipeline use case.
/// The `run` method blocks 'forever'; it only returns in two cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
///
/// Callers must construct a `BackendBuilder` and a `Config`.
/// Algorithm implementations should
/// 1. Initializes an ipc backendbuilder (depending on the datapath).
/// 2. Calls `run()`, or `spawn() `passing the `BackendBuilder b` and a `Config` with optional
/// logger and command line argument structure.
/// Run() or spawn() create arc<AtomicBool> objects,
/// which are passed into run_inner to build the backend, so spawn() can create a CCPHandle that references this
/// boolean to kill the thread.
pub fn run<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> Result<!>
where
I: Ipc,
U: CongAlg<I>,
{
// call run_inner
match run_inner(
Arc::new(atomic::AtomicBool::new(true)),
backend_builder,
cfg,
alg,
) {
Ok(_) => unreachable!(),
Err(e) => Err(e),
}
}
/// Spawn a thread which will perform the CCP execution loop. Returns
/// a `CCPHandle`, which the caller can use to cause the execution loop
/// to stop.
/// The `run` method blocks 'forever'; it only returns in three cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
/// 3. The caller calls `CCPHandle::kill()`
///
/// See [`run`](./fn.run.html) for more information.
pub fn spawn<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> CCPHandle
where
I: Ipc,
U: CongAlg<I> +'static + Send,
{
let stop_signal = Arc::new(atomic::AtomicBool::new(true));
CCPHandle {
continue_listening: stop_signal.clone(),
join_handle: thread::spawn(move || run_inner(stop_signal, backend_builder, cfg, alg)),
}
}
// Main execution inner loop of ccp.
// Blocks "forever", or until the iterator stops iterating.
//
// `run_inner()`:
// 1. listens for messages from the datapath
// 2. call the appropriate message in `U: impl CongAlg`
// The function can return for two reasons: an error, or the iterator returned None.
// The latter should only happen for spawn(), and not for run().
// It returns any error, either from:
// 1. the IPC channel failing
// 2. Receiving an install control message (only the datapath should receive these).
fn run_inner<I, U>(
continue_listening: Arc<atomic::AtomicBool>,
backend_builder: BackendBuilder<I>,
cfg: Config,
alg: U,
) -> Result<()>
where
I: Ipc,
U: CongAlg<I>,
{
let mut receive_buf = [0u8; 1024];
let mut b = backend_builder.build(continue_listening.clone(), &mut receive_buf[..]);
let mut flows = HashMap::<u32, U::Flow>::default();
let backend = b.sender();
if let Some(log) = cfg.logger.as_ref() {
info!(log, "starting CCP";
"algorithm" => U::name(),
"ipc" => I::name(),
);
}
let mut scope_map = Rc::new(HashMap::<String, Scope>::default());
let programs = alg.datapath_programs();
for (program_name, program) in programs.iter() {
match lang::compile(program.as_bytes(), &[]) {
Ok((bin, sc)) => {
match send_and_install(0, &backend, bin, &sc) {
Ok(_) => {}
Err(e) => {
return Err(Error(format!(
"Failed to install datapath program \"{}\": {:?}",
program_name, e
)));
}
}
Rc::get_mut(&mut scope_map)
.unwrap()
.insert(program_name.to_string(), sc.clone());
}
Err(e) => {
return Err(Error(format!(
"Datapath program \"{}\" failed to compile: {:?}",
program_name, e
)));
}
}
}
while let Some(msg) = b.next() {
match msg {
Msg::Cr(c) => {
if flows.remove(&c.sid).is_some() {
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "re-creating already created flow"; "sid" => c.sid);
}
}
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "creating new flow";
"sid" => c.sid,
"init_cwnd" => c.init_cwnd,
"mss" => c.mss,
"src_ip" => c.src_ip,
"src_port" => c.src_port,
"dst_ip" => c.dst_ip,
"dst_port" => c.dst_port,
);
}
let f = alg.new_flow(
Datapath {
sock_id: c.sid,
sender: backend.clone(),
programs: scope_map.clone(),
},
DatapathInfo {
sock_id: c.sid,
init_cwnd: c.init_cwnd,
mss: c.mss,
src_ip: c. | DatapathInfo | identifier_name |
lib.rs | let mut h = HashMap::default();
//! h.insert(
//! "MyProgram", "
//! (def (Report
//! (volatile minrtt +infinity)
//! ))
//! (when true
//! (:= Report.minrtt (min Report.minrtt Flow.rtt_sample_us))
//! )
//! (when (> Micros 42000)
//! (report)
//! (reset)
//! )
//! ".to_owned(),
//! );
//! h
//! }
//! fn new_flow(&self, mut control: Datapath<I>, info: DatapathInfo) -> Self::Flow {
//! let sc = control.set_program("MyProgram", None).unwrap();
//! MyCongestionControlAlgorithm(sc)
//! }
//! }
//! impl Flow for MyCongestionControlAlgorithm {
//! fn on_report(&mut self, sock_id: u32, m: Report) {
//! println!("minrtt: {:?}", m.get_field("Report.minrtt", &self.0).unwrap());
//! }
//! }
//! ```
#![feature(box_patterns)]
#![feature(integer_atomics)]
#![feature(never_type)]
#![feature(stmt_expr_attributes)]
#![feature(test)]
extern crate bytes;
extern crate clap;
extern crate crossbeam;
extern crate fnv;
extern crate libc;
extern crate nix;
#[macro_use]
extern crate nom;
extern crate time;
#[macro_use]
extern crate slog;
extern crate slog_async;
extern crate slog_term;
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::{atomic, Arc};
use std::thread;
pub mod ipc;
pub mod lang;
pub mod serialize;
pub mod test_helper;
#[macro_use]
pub mod algs;
mod errors;
pub use crate::errors::*;
use crate::ipc::Ipc;
use crate::ipc::{BackendBuilder, BackendSender};
use crate::lang::{Bin, Reg, Scope};
use crate::serialize::Msg;
/// CCP custom `Result` type, using `Error` as the `Err` type.
pub type Result<T> = std::result::Result<T, Error>;
/// A collection of methods to interact with the datapath.
pub trait DatapathTrait {
fn get_sock_id(&self) -> u32;
/// Tell datapath to use a preinstalled program.
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>,
) -> Result<Scope>;
/// Update the value of a register in an already-installed fold function.
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()>;
}
/// A collection of methods to interact with the datapath.
#[derive(Clone)]
pub struct Datapath<T: Ipc> {
sock_id: u32,
sender: BackendSender<T>,
programs: Rc<HashMap<String, Scope>>,
}
impl<T: Ipc> DatapathTrait for Datapath<T> {
fn get_sock_id(&self) -> u32 {
self.sock_id
}
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>,
) -> Result<Scope> {
// if the program with this key exists, return it; otherwise return nothing
match self.programs.get(program_name) {
Some(sc) => {
// apply optional updates to values of registers in this scope
let fields: Vec<(Reg, u64)> = fields
.unwrap_or_else(|| &[])
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::changeprog::Msg {
sid: self.sock_id,
program_uid: sc.program_uid,
num_fields: fields.len() as u32,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(sc.clone())
}
_ => Err(Error(format!(
"Map does not contain datapath program with key: {:?}",
program_name
))),
}
}
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()> {
let fields: Vec<(Reg, u64)> = update
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::update_field::Msg {
sid: self.sock_id,
num_fields: fields.len() as u8,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(())
}
}
fn send_and_install<I>(sock_id: u32, sender: &BackendSender<I>, bin: Bin, sc: &Scope) -> Result<()>
where
I: Ipc,
|
/// Configuration parameters for the portus runtime.
/// Defines a `slog::Logger` to use for (optional) logging
#[derive(Clone, Default)]
pub struct Config {
pub logger: Option<slog::Logger>,
}
/// The set of information passed by the datapath to CCP
/// when a connection starts. It includes a unique 5-tuple (CCP socket id + source and destination
/// IP and port), the initial congestion window (`init_cwnd`), and flow MSS.
#[derive(Debug, Clone)]
pub struct DatapathInfo {
pub sock_id: u32,
pub init_cwnd: u32,
pub mss: u32,
pub src_ip: u32,
pub src_port: u32,
pub dst_ip: u32,
pub dst_port: u32,
}
/// Contains the values of the pre-defined Report struct from the fold function.
/// Use `get_field` to query its values using the names defined in the fold function.
pub struct Report {
pub program_uid: u32,
fields: Vec<u64>,
}
impl Report {
/// Uses the `Scope` returned by `lang::compile` (or `install`) to query
/// the `Report` for its values.
pub fn get_field(&self, field: &str, sc: &Scope) -> Result<u64> {
if sc.program_uid!= self.program_uid {
return Err(Error::from(StaleProgramError));
}
match sc.get(field) {
Some(r) => match *r {
Reg::Report(idx, _, _) => {
if idx as usize >= self.fields.len() {
Err(Error::from(InvalidReportError))
} else {
Ok(self.fields[idx as usize])
}
}
_ => Err(Error::from(InvalidRegTypeError)),
},
None => Err(Error::from(FieldNotFoundError)),
}
}
}
/// Implement this trait, [`portus::CongAlg`](./trait.CongAlg.html), and
///[`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) to define a CCP congestion control
/// algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait Flow {
/// This callback specifies the algorithm's behavior when it receives a report
/// of measurements from the datapath.
fn on_report(&mut self, sock_id: u32, m: Report);
/// Optionally specify what the algorithm should do when the flow ends,
/// e.g., clean up any external resources.
/// The default implementation does nothing.
fn close(&mut self) {}
}
impl<T> Flow for Box<T>
where
T: Flow +?Sized,
{
fn on_report(&mut self, sock_id: u32, m: Report) {
T::on_report(self, sock_id, m)
}
fn close(&mut self) {
T::close(self)
}
}
/// implement this trait, [`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) and
/// [`portus::Flow`](./trait.Flow.html) to define a ccp congestion control algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait CongAlg<I: Ipc> {
/// A type which implements the [`portus::Flow`](./trait.Flow.html) trait, to manage
/// an individual connection.
type Flow: Flow;
/// A unique name for the algorithm.
fn name() -> &'static str;
/// `datapath_programs` returns all datapath programs the congestion control algorithm
/// will to use during its execution. It is called once, when Portus initializes
/// ([`portus::run`](./fn.run.html) or [`portus::spawn`](./fn.spawn.html)).
///
/// It should return a vector of string tuples, where the first string in each tuple is a unique name
/// identifying the program, and the second string is the code for the program itself.
///
/// The Portus runtime will panic if any of the datapath programs do not compile.
///
/// For example,
/// ```
/// extern crate fnv;
/// use std::collections::HashMap;
/// let mut h = HashMap::new();
/// h.insert("prog1", "...(program)...".to_string());
/// h.insert("prog2", "...(program)...".to_string());
/// ```
fn datapath_programs(&self) -> HashMap<&'static str, String>;
/// Create a new instance of the CongAlg to manage a new flow.
/// Optionally copy any configuration parameters from `&self`.
fn new_flow(&self, control: Datapath<I>, info: DatapathInfo) -> Self::Flow;
}
/// Structs implementing [`portus::CongAlg`](./trait.CongAlg.html) must also implement this trait
/// (and must be annotated with [`portus_export::register_ccp_alg`]())
///
/// The expected use of this trait in a calling program is as follows:
/// ```no-run
/// let args = CongAlgBuilder::args();
/// let matches = app.get_matches_from(std::env::args_os());
/// let alg = CongAlgBuilder::with_arg_matches(matches);
/// ```
pub trait CongAlgBuilder<'a, 'b> {
/// This function should return a new
/// [`clap::App`](https://docs.rs/clap/2.32.0/clap/struct.App.html) that describes the
/// arguments this algorithm needs to create an instance of itself.
fn args() -> clap::App<'a, 'b>;
/// This function takes as input the set of parsed arguments and uses them to parameterize a
/// new instance of this congestion control algorithm. The matches will be derived from
/// running `Clap::App::get_matches_from` on the `clap::App` returned by the `register` function.
/// It also takes an instsance of a logger so that the calling program can define the logging
/// behavior (eg. format and redirection).
fn with_arg_matches(args: &clap::ArgMatches, logger: Option<slog::Logger>) -> Result<Self>
where
Self: Sized;
}
/// A handle to manage running instances of the CCP execution loop.
#[derive(Debug)]
pub struct CCPHandle {
pub continue_listening: Arc<atomic::AtomicBool>,
pub join_handle: thread::JoinHandle<Result<()>>,
}
impl CCPHandle {
/// Instruct the execution loop to exit.
pub fn kill(&self) {
self.continue_listening
.store(false, atomic::Ordering::SeqCst);
}
// TODO: join_handle.join() returns an Err instead of Ok, because
// some function panicked, this function should return an error
// with the same string from the panic.
/// Collect the error from the thread running the CCP execution loop
/// once it exits.
pub fn wait(self) -> Result<()> {
match self.join_handle.join() {
Ok(r) => r,
Err(_) => Err(Error(String::from("Call to run_inner panicked"))),
}
}
}
/// Main execution loop of CCP for the static pipeline use case.
/// The `run` method blocks 'forever'; it only returns in two cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
///
/// Callers must construct a `BackendBuilder` and a `Config`.
/// Algorithm implementations should
/// 1. Initializes an ipc backendbuilder (depending on the datapath).
/// 2. Calls `run()`, or `spawn() `passing the `BackendBuilder b` and a `Config` with optional
/// logger and command line argument structure.
/// Run() or spawn() create arc<AtomicBool> objects,
/// which are passed into run_inner to build the backend, so spawn() can create a CCPHandle that references this
/// boolean to kill the thread.
pub fn run<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> Result<!>
where
I: Ipc,
U: CongAlg<I>,
{
// call run_inner
match run_inner(
Arc::new(atomic::AtomicBool::new(true)),
backend_builder,
cfg,
alg,
) {
Ok(_) => unreachable!(),
Err(e) => Err(e),
}
}
/// Spawn a thread which will perform the CCP execution loop. Returns
/// a `CCPHandle`, which the caller can use to cause the execution loop
/// to stop.
/// The `run` method blocks 'forever'; it only returns in three cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
/// 3. The caller calls `CCPHandle::kill()`
///
/// See [`run`](./fn.run.html) for more information.
pub fn spawn<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> CCPHandle
where
I: Ipc,
U: CongAlg<I> +'static + Send,
{
let stop_signal = Arc::new(atomic::AtomicBool::new(true));
CCPHandle {
continue_listening: stop_signal.clone(),
join_handle: thread::spawn(move || run_inner(stop_signal, backend_builder, cfg, alg)),
}
}
// Main execution inner loop of ccp.
// Blocks "forever", or until the iterator stops iterating.
//
// `run_inner()`:
// 1. listens for messages from the datapath
// 2. call the appropriate message in `U: impl CongAlg`
// The function can return for two reasons: an error, or the iterator returned None.
// The latter should only happen for spawn(), and not for run().
// It returns any error, either from:
// 1. the IPC channel failing
// 2. Receiving an install control message (only the datapath should receive these).
fn run_inner<I, U>(
continue_listening: Arc<atomic::AtomicBool>,
backend_builder: BackendBuilder<I>,
cfg: Config,
alg: U,
) -> Result<()>
where
I: Ipc,
U: CongAlg<I>,
{
let mut receive_buf = [0u8; 1024];
let mut b = backend_builder.build(continue_listening.clone(), &mut receive_buf[..]);
let mut flows = HashMap::<u32, U::Flow>::default();
let backend = b.sender();
if let Some(log) = cfg.logger.as_ref() {
info!(log, "starting CCP";
"algorithm" => U::name(),
"ipc" => I::name(),
);
}
let mut scope_map = Rc::new(HashMap::<String, Scope>::default());
let programs = alg.datapath_programs();
for (program_name, program) in programs.iter() {
match lang::compile(program.as_bytes(), &[]) {
Ok((bin, sc)) => {
match send_and_install(0, &backend, bin, &sc) {
Ok(_) => {}
Err(e) => {
return Err(Error(format!(
"Failed to install datapath program \"{}\": {:?}",
program_name, e
)));
}
}
Rc::get_mut(&mut scope_map)
.unwrap()
.insert(program_name.to_string(), sc.clone());
}
Err(e) => {
return Err(Error(format!(
"Datapath program \"{}\" failed to compile: {:?}",
program_name, e
)));
}
}
}
while let Some(msg) = b.next() {
match msg {
Msg::Cr(c) => {
if flows.remove(&c.sid).is_some() {
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "re-creating already created flow"; "sid" => c.sid);
}
}
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "creating new flow";
"sid" => c.sid,
"init_cwnd" => c.init_cwnd,
"mss" => c.mss,
"src_ip" => c.src_ip,
"src_port" => c.src_port,
"dst_ip" => c.dst_ip,
"dst_port" => c.dst_port,
);
}
let f = alg.new_flow(
Datapath {
sock_id: c.sid,
sender: backend.clone(),
programs: scope_map.clone(),
},
DatapathInfo {
sock_id: c.sid,
init_cwnd: c.init_cwnd,
mss: c.mss,
src_ip: c.src | {
let msg = serialize::install::Msg {
sid: sock_id,
program_uid: sc.program_uid,
num_events: bin.events.len() as u32,
num_instrs: bin.instrs.len() as u32,
instrs: bin,
};
let buf = serialize::serialize(&msg)?;
sender.send_msg(&buf[..])?;
Ok(())
} | identifier_body |
utils.rs | pub const PLUMO_SETUP_PERSONALIZATION: &[u8] = b"PLUMOSET";
pub const ADDRESS_LENGTH: usize = 20;
pub const ADDRESS_LENGTH_IN_HEX: usize = 42;
pub const SIGNATURE_LENGTH_IN_HEX: usize = 130;
pub const DEFAULT_MAX_RETRIES: usize = 5;
pub const ONE_MB: usize = 1024 * 1024;
pub const DEFAULT_CHUNK_SIZE: u64 = 1 * (ONE_MB as u64);
pub const DEFAULT_NUM_PARALLEL_CHUNKS: usize = 50;
pub const DEFAULT_CHUNK_TIMEOUT_IN_SECONDS: u64 = 300;
pub const BEACON_HASH_LENGTH: usize = 32;
use crate::blobstore::{upload_access_key, upload_sas};
use crate::data_structs::{
Attestation, Ceremony, Parameters, PlumoSetupKeys, ProcessorData, Response,
};
use crate::error::{UtilsError, VerifyTranscriptError};
use age::{
armor::{ArmoredWriter, Format},
EncryptError, Encryptor,
};
use algebra::PairingEngine;
use anyhow::Result;
use ethers::types::{Address, Signature};
use hex::ToHex;
use phase1::{ContributionMode, Phase1Parameters, ProvingSystem};
use reqwest::header::{AUTHORIZATION, CONTENT_LENGTH, CONTENT_TYPE, RANGE};
use secrecy::{ExposeSecret, SecretString, SecretVec};
use serde::Serialize;
use std::{
fs::{copy, remove_file, File, OpenOptions},
io::{Read, Write},
path::Path,
str::FromStr,
};
use tracing::warn;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Phase {
Phase1,
Phase2,
}
pub fn string_to_phase(str: &str) -> Result<Phase> {
match str.to_lowercase().as_ref() {
"phase1" => Ok(Phase::Phase1),
"phase2" => Ok(Phase::Phase2),
"" => Err(UtilsError::NoPhaseError.into()),
x => Err(UtilsError::UnknownPhaseError(x.to_string()).into()),
}
}
pub fn copy_file_if_exists(file_path: &str, dest_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
copy(file_path, dest_path)?;
}
Ok(())
}
pub fn download_file(url: &str, file_path: &str) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut resp = reqwest::blocking::get(url)?.error_for_status()?;
let mut out = File::create(file_path)?;
resp.copy_to(&mut out)?;
Ok(())
}
pub async fn download_file_from_azure_async(
url: &str,
expected_length: u64,
file_path: &str,
) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut out = File::create(file_path)?;
let num_chunks = (expected_length + DEFAULT_CHUNK_SIZE - 1) / DEFAULT_CHUNK_SIZE;
let mut futures = vec![];
for chunk_index in 0..num_chunks {
let url = url.to_string();
futures.push(tokio::spawn(FutureRetry::new(
move || {
let url = url.clone();
async move {
let start = chunk_index * DEFAULT_CHUNK_SIZE;
let end = if chunk_index == num_chunks - 1 {
expected_length - 1
} else {
(chunk_index + 1) * DEFAULT_CHUNK_SIZE - 1
};
let client = reqwest::Client::new();
let mut resp = client
.get(&url)
.header(CONTENT_TYPE, "application/octet-stream")
.header(RANGE, format!("bytes={}-{}", start, end))
.header(CONTENT_LENGTH, 0)
.timeout(std::time::Duration::from_secs(
DEFAULT_CHUNK_TIMEOUT_IN_SECONDS,
))
.send()
.await?
.error_for_status()?;
let mut bytes = Vec::with_capacity((end - start + 1) as usize);
while let Some(chunk) = resp.chunk().await? {
bytes.write_all(&chunk)?;
}
Ok::<Vec<u8>, anyhow::Error>(bytes)
}
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)));
}
let bytes_list = futures::future::try_join_all(futures)
.await?
.into_iter()
.collect::<Result<Vec<_>, _>>()
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?
.into_iter()
.map(|(v, _)| v);
for bytes in bytes_list {
out.write_all(&bytes)?;
}
Ok(())
}
pub async fn download_file_direct_async(url: &str, file_path: &str) -> Result<()> {
let url = url.to_string();
let file_path = file_path.to_string();
FutureRetry::new(
|| async {
remove_file_if_exists(&file_path)?;
let mut resp = reqwest::get(&url).await?.error_for_status()?;
let mut out = File::create(&file_path)?;
while let Some(chunk) = resp.chunk().await? {
out.write_all(&chunk)?;
}
Ok(())
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)
.await
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?;
Ok(())
}
pub async fn upload_file_to_azure_async(file_path: &str, url: &str) -> Result<()> {
upload_sas(file_path, url).await?;
Ok(())
}
pub async fn upload_file_to_azure_with_access_key_async(
file_path: &str,
access_key: &str,
account: &str,
container: &str,
path: &str,
) -> Result<()> {
upload_access_key(file_path, access_key, account, container, path).await?;
Ok(())
}
pub async fn upload_file_direct_async(
authorization: &str,
file_path: &str,
url: &str,
) -> Result<()> {
let mut file = File::open(file_path)?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)?;
let client = reqwest::Client::new();
client
.post(url)
.header(AUTHORIZATION, authorization)
.header(CONTENT_TYPE, "application/octet-stream")
.body(contents)
.send()
.await?
.error_for_status()?;
Ok(())
}
pub fn vrs_to_rsv(rsv: &str) -> String {
format!("{}{}{}", &rsv[2..66], &rsv[66..130], &rsv[..2])
}
pub fn remove_file_if_exists(file_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
remove_file(file_path)?;
}
Ok(())
}
pub async fn get_content_length(url: &str) -> Result<u64> {
let client = reqwest::Client::new();
let result = client.head(url).send().await?.error_for_status()?;
Ok(result.headers()["content-length"]
.to_str()?
.parse::<u64>()?)
}
pub async fn get_ceremony(url: &str) -> Result<Ceremony> {
let response = reqwest::get(url).await?.error_for_status()?;
let data = response.text().await?;
let ceremony: Ceremony = serde_json::from_str::<Response<Ceremony>>(&data)?.result;
Ok(ceremony)
}
use crate::transcript_data_structs::Transcript;
use blake2::{Blake2s, Digest};
use ethers::signers::{LocalWallet, Signer};
use futures_retry::{ErrorHandler, FutureRetry, RetryPolicy};
use rand::rngs::OsRng;
use rand::RngCore;
pub fn verify_signed_data<T: Serialize>(data: &T, signature: &str, id: &str) -> Result<()> {
let signature = Signature::from_str(&signature[2..])?;
let serialized_data = serde_json::to_string(data)?;
let deserialized_id = hex::decode(&id[2..])?;
if deserialized_id.len()!= ADDRESS_LENGTH {
return Err(VerifyTranscriptError::IDWrongLength(deserialized_id.len()).into());
}
let mut address = [0u8; ADDRESS_LENGTH];
address.copy_from_slice(&deserialized_id);
let address = Address::from(address);
signature.verify(serialized_data, address)?;
Ok(())
}
pub fn read_hash_from_file(file_name: &str) -> Result<String> {
let mut hash = vec![];
File::open(file_name)
.expect("Should have opened hash file.")
.read_to_end(&mut hash)
.expect("Should have read hash file.");
let hash_hex = hex::encode(&hash);
Ok(hash_hex)
}
pub fn proving_system_from_str(proving_system_str: &str) -> Result<ProvingSystem> {
let proving_system = match proving_system_str {
"groth16" => ProvingSystem::Groth16,
"marlin" => ProvingSystem::Marlin,
_ => {
return Err(VerifyTranscriptError::UnsupportedProvingSystemError(
proving_system_str.to_string(),
)
.into());
}
};
Ok(proving_system)
}
pub fn check_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a!= b {
return Err(VerifyTranscriptError::WrongChallengeHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_response_hashes_same(a: &str, b: &str) -> Result<()> {
if a!= b {
return Err(VerifyTranscriptError::WrongResponseHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_new_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a!= b {
return Err(
VerifyTranscriptError::WrongNewChallengeHash(a.to_string(), b.to_string()).into(),
);
}
Ok(())
}
pub fn get_authorization_value(
private_key: &LocalWallet,
method: &str,
path: &str,
) -> Result<String> {
let address = private_key.address().encode_hex::<String>();
let message = format!("{} /{}", method.to_lowercase(), path.to_lowercase());
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
let authorization = format!("Celo 0x{}:0x{}", address, signature.to_string());
Ok(authorization)
}
pub fn create_parameters_for_chunk<E: PairingEngine>(
ceremony_parameters: &Parameters,
chunk_index: usize,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_chunk(
ContributionMode::Chunked,
chunk_index,
ceremony_parameters.chunk_size,
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn create_full_parameters<E: PairingEngine>(
ceremony_parameters: &Parameters,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_full(
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn sign_json(private_key: &LocalWallet, value: &serde_json::Value) -> Result<String> {
let message = serde_json::to_string(value)?;
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
Ok(format!("0x{}", signature.to_string()))
}
pub fn address_to_string(address: &Address) -> String {
format!("0x{}", address.encode_hex::<String>())
}
#[derive(Debug, Clone, Copy)]
pub enum UploadMode {
Auto,
Azure,
Direct,
}
pub fn upload_mode_from_str(upload_mode: &str) -> Result<UploadMode> {
match upload_mode {
"auto" => Ok(UploadMode::Auto),
"azure" => Ok(UploadMode::Azure),
"direct" => Ok(UploadMode::Direct),
_ => Err(UtilsError::UnknownUploadModeError(upload_mode.to_string()).into()),
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum ParticipationMode {
Contribute,
Verify,
}
pub fn participation_mode_from_str(participation_mode: &str) -> Result<ParticipationMode> {
match participation_mode {
"contribute" => Ok(ParticipationMode::Contribute),
"verify" => Ok(ParticipationMode::Verify),
_ => Err(UtilsError::UnknownParticipationModeError(participation_mode.to_string()).into()),
}
}
fn decrypt(passphrase: &SecretString, encrypted: &str) -> Result<Vec<u8>> {
let decoded = SecretVec::new(hex::decode(encrypted)?);
let decryptor = age::Decryptor::new(decoded.expose_secret().as_slice())?;
let mut output = vec![];
if let age::Decryptor::Passphrase(decryptor) = decryptor {
let mut reader = decryptor.decrypt(passphrase, None)?;
reader.read_to_end(&mut output)?;
} else {
return Err(UtilsError::UnsupportedDecryptorError.into());
}
Ok(output)
}
pub fn encrypt(encryptor: Encryptor, secret: &[u8]) -> Result<String> {
let mut encrypted_output = vec![];
let mut writer = encryptor
.wrap_output(ArmoredWriter::wrap_output(
&mut encrypted_output,
Format::Binary,
)?)
.map_err(|e| match e {
EncryptError::Io(e) => e,
})?;
std::io::copy(&mut std::io::Cursor::new(secret), &mut writer)?;
writer.finish()?;
let encrypted_secret = hex::encode(&encrypted_output);
Ok(encrypted_secret.to_string())
}
pub fn read_keys(
keys_file: &str,
should_use_stdin: bool,
should_collect_extra_entropy: bool,
) -> Result<(SecretVec<u8>, SecretVec<u8>, Attestation)> {
let mut contents = String::new();
{
std::fs::File::open(&keys_file)?.read_to_string(&mut contents)?;
}
let mut keys: PlumoSetupKeys = serde_json::from_str(&contents)?;
let description = "Enter your Plumo setup passphrase";
let passphrase = if should_use_stdin {
println!("{}:", description);
SecretString::new(rpassword::read_password()?)
} else {
age::cli_common::read_secret(description, "Passphrase", None)
.map_err(|_| UtilsError::CouldNotReadPassphraseError)?
};
let plumo_seed_from_file = SecretVec::new(decrypt(&passphrase, &keys.encrypted_seed)?);
let plumo_private_key_from_file =
SecretVec::new(decrypt(&passphrase, &keys.encrypted_private_key)?);
if should_collect_extra_entropy && keys.encrypted_extra_entropy.is_none() &&!should_use_stdin {
let description = "Enter some extra entropy (this should only be done at the first time you run the contribute binary!)";
let entered_entropy = age::cli_common::read_secret(description, "Entropy", None)
.map_err(|_| UtilsError::CouldNotReadEntropyError)?;
let encryptor = age::Encryptor::with_user_passphrase(passphrase.clone());
let mut rng = OsRng;
let mut extra_entropy = vec![0u8; 64];
rng.fill_bytes(&mut extra_entropy[..]);
let extra_entropy = SecretVec::new(extra_entropy);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(extra_entropy.expose_secret());
hasher.update(entered_entropy.expose_secret());
let combined_entropy = SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec());
let encrypted_extra_entropy = encrypt(encryptor, combined_entropy.expose_secret())?;
keys.encrypted_extra_entropy = Some(encrypted_extra_entropy);
let mut file = OpenOptions::new().write(true).open(&keys_file)?;
file.write_all(&serde_json::to_vec(&keys)?)?;
file.sync_all()?;
}
let plumo_seed = match keys.encrypted_extra_entropy {
None => plumo_seed_from_file,
Some(encrypted_entropy) => {
let entropy = SecretVec::new(decrypt(&passphrase, &encrypted_entropy)?);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(plumo_seed_from_file.expose_secret());
hasher.update(entropy.expose_secret());
SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec())
}
};
Ok((plumo_seed, plumo_private_key_from_file, keys.attestation))
}
pub fn collect_processor_data() -> Result<Vec<ProcessorData>> {
cfg_if::cfg_if! {
if #[cfg(not(target_arch = "aarch64"))] {
use sysinfo::{ProcessorExt, System, SystemExt};
let s = System::new();
let processors = s
.get_processors()
.iter()
.map(|p| ProcessorData {
name: p.get_name().to_string(),
brand: p.get_brand().to_string(),
frequency: p.get_frequency().to_string(),
})
.collect();
Ok(processors)
} else {
Ok(vec![])
}
}
}
pub struct MaxRetriesHandler {
max_attempts: usize,
}
impl MaxRetriesHandler {
pub fn new(max_attempts: usize) -> Self {
MaxRetriesHandler { max_attempts }
}
}
impl ErrorHandler<anyhow::Error> for MaxRetriesHandler {
type OutError = anyhow::Error;
fn handle(&mut self, attempt: usize, e: anyhow::Error) -> RetryPolicy<Self::OutError> {
warn!(
"Failed: {}, retry {}/{}",
e.to_string(),
attempt,
self.max_attempts,
);
if attempt >= self.max_attempts {
RetryPolicy::ForwardError(e)
} else {
RetryPolicy::WaitRetry(
chrono::Duration::seconds(5)
.to_std()
.expect("Should have converted to standard duration"),
)
}
}
}
pub fn challenge_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.accumulator_size as u64
}
pub fn response_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.contribution_size as u64
}
pub fn load_transcript() -> Result<Transcript> {
let filename = "transcript";
if!std::path::Path::new(filename).exists() {
let mut file = File::create(filename)?;
file.write_all(
serde_json::to_string_pretty(&Transcript {
rounds: vec![],
beacon_hash: None,
final_hash: None,
})?
.as_bytes(),
)?;
}
let mut file = File::open(filename)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let transcript: Transcript = serde_json::from_str::<Transcript>(&contents)?;
Ok(transcript)
}
pub fn save_transcript(transcript: &Transcript) -> Result<()> {
let filename = "transcript";
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn backup_transcript(transcript: &Transcript) -> Result<()> {
let filename = format!("transcript_{}", chrono::Utc::now().timestamp_nanos());
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn format_attestation(attestation_message: &str, address: &str, signature: &str) -> String {
format!("{} {} {}", attestation_message, address, signature)
}
pub fn extract_signature_from_attestation(attestation: &str) -> Result<(String, String, String)> {
let attestation = attestation.to_string();
let attestation_parts = attestation.split(" ").collect::<Vec<_>>();
if attestation_parts.len() < 3 {
return Err(UtilsError::AttestationTooShort(attestation_parts.len()).into());
}
Ok((
attestation_parts[0..=attestation_parts.len() - 3].join(" "),
attestation_parts[attestation_parts.len() - 2].to_string(),
attestation_parts[attestation_parts.len() - 1].to_string(),
))
}
pub fn write_attestation_to_file(attestation: &Attestation, path: &str) -> Result<()> |
pub fn trim_newline(s: &mut String) {
if s.ends_with('\n') {
s.pop();
if s.ends_with('\r') {
s.pop();
}
}
}
pub fn compute_hash_from_file(fname: &str) -> Result<String> {
let challenge_contents = std::fs::read(fname)?;
Ok(hex::encode(setup_utils::calculate_hash(
&challenge_contents,
)))
}
| {
File::create(path)?.write_all(
format_attestation(
&attestation.id,
&attestation.address,
&attestation.signature,
)
.as_bytes(),
)?;
Ok(())
} | identifier_body |
utils.rs | pub const PLUMO_SETUP_PERSONALIZATION: &[u8] = b"PLUMOSET";
pub const ADDRESS_LENGTH: usize = 20;
pub const ADDRESS_LENGTH_IN_HEX: usize = 42;
pub const SIGNATURE_LENGTH_IN_HEX: usize = 130;
pub const DEFAULT_MAX_RETRIES: usize = 5;
pub const ONE_MB: usize = 1024 * 1024;
pub const DEFAULT_CHUNK_SIZE: u64 = 1 * (ONE_MB as u64);
pub const DEFAULT_NUM_PARALLEL_CHUNKS: usize = 50;
pub const DEFAULT_CHUNK_TIMEOUT_IN_SECONDS: u64 = 300;
pub const BEACON_HASH_LENGTH: usize = 32;
use crate::blobstore::{upload_access_key, upload_sas};
use crate::data_structs::{
Attestation, Ceremony, Parameters, PlumoSetupKeys, ProcessorData, Response,
};
use crate::error::{UtilsError, VerifyTranscriptError};
use age::{
armor::{ArmoredWriter, Format},
EncryptError, Encryptor,
};
use algebra::PairingEngine;
use anyhow::Result;
use ethers::types::{Address, Signature};
use hex::ToHex;
use phase1::{ContributionMode, Phase1Parameters, ProvingSystem};
use reqwest::header::{AUTHORIZATION, CONTENT_LENGTH, CONTENT_TYPE, RANGE};
use secrecy::{ExposeSecret, SecretString, SecretVec};
use serde::Serialize;
use std::{
fs::{copy, remove_file, File, OpenOptions},
io::{Read, Write},
path::Path,
str::FromStr,
};
use tracing::warn;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Phase {
Phase1,
Phase2,
}
pub fn string_to_phase(str: &str) -> Result<Phase> {
match str.to_lowercase().as_ref() {
"phase1" => Ok(Phase::Phase1),
"phase2" => Ok(Phase::Phase2),
"" => Err(UtilsError::NoPhaseError.into()),
x => Err(UtilsError::UnknownPhaseError(x.to_string()).into()),
}
}
pub fn copy_file_if_exists(file_path: &str, dest_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
copy(file_path, dest_path)?;
}
Ok(())
}
pub fn download_file(url: &str, file_path: &str) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut resp = reqwest::blocking::get(url)?.error_for_status()?;
let mut out = File::create(file_path)?;
resp.copy_to(&mut out)?;
Ok(())
}
pub async fn download_file_from_azure_async(
url: &str,
expected_length: u64,
file_path: &str,
) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut out = File::create(file_path)?;
let num_chunks = (expected_length + DEFAULT_CHUNK_SIZE - 1) / DEFAULT_CHUNK_SIZE;
let mut futures = vec![];
for chunk_index in 0..num_chunks {
let url = url.to_string();
futures.push(tokio::spawn(FutureRetry::new(
move || {
let url = url.clone();
async move {
let start = chunk_index * DEFAULT_CHUNK_SIZE;
let end = if chunk_index == num_chunks - 1 {
expected_length - 1
} else {
(chunk_index + 1) * DEFAULT_CHUNK_SIZE - 1
};
let client = reqwest::Client::new();
let mut resp = client
.get(&url)
.header(CONTENT_TYPE, "application/octet-stream")
.header(RANGE, format!("bytes={}-{}", start, end))
.header(CONTENT_LENGTH, 0)
.timeout(std::time::Duration::from_secs(
DEFAULT_CHUNK_TIMEOUT_IN_SECONDS,
))
.send()
.await?
.error_for_status()?;
let mut bytes = Vec::with_capacity((end - start + 1) as usize);
while let Some(chunk) = resp.chunk().await? {
bytes.write_all(&chunk)?;
}
Ok::<Vec<u8>, anyhow::Error>(bytes)
}
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)));
}
let bytes_list = futures::future::try_join_all(futures)
.await?
.into_iter()
.collect::<Result<Vec<_>, _>>()
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?
.into_iter()
.map(|(v, _)| v);
for bytes in bytes_list {
out.write_all(&bytes)?;
}
Ok(())
}
pub async fn download_file_direct_async(url: &str, file_path: &str) -> Result<()> {
let url = url.to_string();
let file_path = file_path.to_string();
FutureRetry::new(
|| async {
remove_file_if_exists(&file_path)?;
let mut resp = reqwest::get(&url).await?.error_for_status()?;
let mut out = File::create(&file_path)?;
while let Some(chunk) = resp.chunk().await? {
out.write_all(&chunk)?;
}
Ok(())
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)
.await
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?;
Ok(())
}
pub async fn upload_file_to_azure_async(file_path: &str, url: &str) -> Result<()> {
upload_sas(file_path, url).await?;
Ok(())
}
pub async fn upload_file_to_azure_with_access_key_async(
file_path: &str,
access_key: &str,
account: &str,
container: &str,
path: &str,
) -> Result<()> {
upload_access_key(file_path, access_key, account, container, path).await?;
Ok(())
}
pub async fn upload_file_direct_async(
authorization: &str,
file_path: &str,
url: &str,
) -> Result<()> {
let mut file = File::open(file_path)?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)?;
let client = reqwest::Client::new();
client
.post(url)
.header(AUTHORIZATION, authorization)
.header(CONTENT_TYPE, "application/octet-stream")
.body(contents)
.send()
.await?
.error_for_status()?;
Ok(())
}
pub fn vrs_to_rsv(rsv: &str) -> String {
format!("{}{}{}", &rsv[2..66], &rsv[66..130], &rsv[..2])
}
pub fn remove_file_if_exists(file_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
remove_file(file_path)?;
}
Ok(())
}
pub async fn get_content_length(url: &str) -> Result<u64> {
let client = reqwest::Client::new();
let result = client.head(url).send().await?.error_for_status()?;
Ok(result.headers()["content-length"] | .parse::<u64>()?)
}
pub async fn get_ceremony(url: &str) -> Result<Ceremony> {
let response = reqwest::get(url).await?.error_for_status()?;
let data = response.text().await?;
let ceremony: Ceremony = serde_json::from_str::<Response<Ceremony>>(&data)?.result;
Ok(ceremony)
}
use crate::transcript_data_structs::Transcript;
use blake2::{Blake2s, Digest};
use ethers::signers::{LocalWallet, Signer};
use futures_retry::{ErrorHandler, FutureRetry, RetryPolicy};
use rand::rngs::OsRng;
use rand::RngCore;
pub fn verify_signed_data<T: Serialize>(data: &T, signature: &str, id: &str) -> Result<()> {
let signature = Signature::from_str(&signature[2..])?;
let serialized_data = serde_json::to_string(data)?;
let deserialized_id = hex::decode(&id[2..])?;
if deserialized_id.len()!= ADDRESS_LENGTH {
return Err(VerifyTranscriptError::IDWrongLength(deserialized_id.len()).into());
}
let mut address = [0u8; ADDRESS_LENGTH];
address.copy_from_slice(&deserialized_id);
let address = Address::from(address);
signature.verify(serialized_data, address)?;
Ok(())
}
pub fn read_hash_from_file(file_name: &str) -> Result<String> {
let mut hash = vec![];
File::open(file_name)
.expect("Should have opened hash file.")
.read_to_end(&mut hash)
.expect("Should have read hash file.");
let hash_hex = hex::encode(&hash);
Ok(hash_hex)
}
pub fn proving_system_from_str(proving_system_str: &str) -> Result<ProvingSystem> {
let proving_system = match proving_system_str {
"groth16" => ProvingSystem::Groth16,
"marlin" => ProvingSystem::Marlin,
_ => {
return Err(VerifyTranscriptError::UnsupportedProvingSystemError(
proving_system_str.to_string(),
)
.into());
}
};
Ok(proving_system)
}
pub fn check_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a!= b {
return Err(VerifyTranscriptError::WrongChallengeHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_response_hashes_same(a: &str, b: &str) -> Result<()> {
if a!= b {
return Err(VerifyTranscriptError::WrongResponseHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_new_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a!= b {
return Err(
VerifyTranscriptError::WrongNewChallengeHash(a.to_string(), b.to_string()).into(),
);
}
Ok(())
}
pub fn get_authorization_value(
private_key: &LocalWallet,
method: &str,
path: &str,
) -> Result<String> {
let address = private_key.address().encode_hex::<String>();
let message = format!("{} /{}", method.to_lowercase(), path.to_lowercase());
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
let authorization = format!("Celo 0x{}:0x{}", address, signature.to_string());
Ok(authorization)
}
pub fn create_parameters_for_chunk<E: PairingEngine>(
ceremony_parameters: &Parameters,
chunk_index: usize,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_chunk(
ContributionMode::Chunked,
chunk_index,
ceremony_parameters.chunk_size,
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn create_full_parameters<E: PairingEngine>(
ceremony_parameters: &Parameters,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_full(
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn sign_json(private_key: &LocalWallet, value: &serde_json::Value) -> Result<String> {
let message = serde_json::to_string(value)?;
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
Ok(format!("0x{}", signature.to_string()))
}
pub fn address_to_string(address: &Address) -> String {
format!("0x{}", address.encode_hex::<String>())
}
#[derive(Debug, Clone, Copy)]
pub enum UploadMode {
Auto,
Azure,
Direct,
}
pub fn upload_mode_from_str(upload_mode: &str) -> Result<UploadMode> {
match upload_mode {
"auto" => Ok(UploadMode::Auto),
"azure" => Ok(UploadMode::Azure),
"direct" => Ok(UploadMode::Direct),
_ => Err(UtilsError::UnknownUploadModeError(upload_mode.to_string()).into()),
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum ParticipationMode {
Contribute,
Verify,
}
pub fn participation_mode_from_str(participation_mode: &str) -> Result<ParticipationMode> {
match participation_mode {
"contribute" => Ok(ParticipationMode::Contribute),
"verify" => Ok(ParticipationMode::Verify),
_ => Err(UtilsError::UnknownParticipationModeError(participation_mode.to_string()).into()),
}
}
fn decrypt(passphrase: &SecretString, encrypted: &str) -> Result<Vec<u8>> {
let decoded = SecretVec::new(hex::decode(encrypted)?);
let decryptor = age::Decryptor::new(decoded.expose_secret().as_slice())?;
let mut output = vec![];
if let age::Decryptor::Passphrase(decryptor) = decryptor {
let mut reader = decryptor.decrypt(passphrase, None)?;
reader.read_to_end(&mut output)?;
} else {
return Err(UtilsError::UnsupportedDecryptorError.into());
}
Ok(output)
}
pub fn encrypt(encryptor: Encryptor, secret: &[u8]) -> Result<String> {
let mut encrypted_output = vec![];
let mut writer = encryptor
.wrap_output(ArmoredWriter::wrap_output(
&mut encrypted_output,
Format::Binary,
)?)
.map_err(|e| match e {
EncryptError::Io(e) => e,
})?;
std::io::copy(&mut std::io::Cursor::new(secret), &mut writer)?;
writer.finish()?;
let encrypted_secret = hex::encode(&encrypted_output);
Ok(encrypted_secret.to_string())
}
pub fn read_keys(
keys_file: &str,
should_use_stdin: bool,
should_collect_extra_entropy: bool,
) -> Result<(SecretVec<u8>, SecretVec<u8>, Attestation)> {
let mut contents = String::new();
{
std::fs::File::open(&keys_file)?.read_to_string(&mut contents)?;
}
let mut keys: PlumoSetupKeys = serde_json::from_str(&contents)?;
let description = "Enter your Plumo setup passphrase";
let passphrase = if should_use_stdin {
println!("{}:", description);
SecretString::new(rpassword::read_password()?)
} else {
age::cli_common::read_secret(description, "Passphrase", None)
.map_err(|_| UtilsError::CouldNotReadPassphraseError)?
};
let plumo_seed_from_file = SecretVec::new(decrypt(&passphrase, &keys.encrypted_seed)?);
let plumo_private_key_from_file =
SecretVec::new(decrypt(&passphrase, &keys.encrypted_private_key)?);
if should_collect_extra_entropy && keys.encrypted_extra_entropy.is_none() &&!should_use_stdin {
let description = "Enter some extra entropy (this should only be done at the first time you run the contribute binary!)";
let entered_entropy = age::cli_common::read_secret(description, "Entropy", None)
.map_err(|_| UtilsError::CouldNotReadEntropyError)?;
let encryptor = age::Encryptor::with_user_passphrase(passphrase.clone());
let mut rng = OsRng;
let mut extra_entropy = vec![0u8; 64];
rng.fill_bytes(&mut extra_entropy[..]);
let extra_entropy = SecretVec::new(extra_entropy);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(extra_entropy.expose_secret());
hasher.update(entered_entropy.expose_secret());
let combined_entropy = SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec());
let encrypted_extra_entropy = encrypt(encryptor, combined_entropy.expose_secret())?;
keys.encrypted_extra_entropy = Some(encrypted_extra_entropy);
let mut file = OpenOptions::new().write(true).open(&keys_file)?;
file.write_all(&serde_json::to_vec(&keys)?)?;
file.sync_all()?;
}
let plumo_seed = match keys.encrypted_extra_entropy {
None => plumo_seed_from_file,
Some(encrypted_entropy) => {
let entropy = SecretVec::new(decrypt(&passphrase, &encrypted_entropy)?);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(plumo_seed_from_file.expose_secret());
hasher.update(entropy.expose_secret());
SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec())
}
};
Ok((plumo_seed, plumo_private_key_from_file, keys.attestation))
}
pub fn collect_processor_data() -> Result<Vec<ProcessorData>> {
cfg_if::cfg_if! {
if #[cfg(not(target_arch = "aarch64"))] {
use sysinfo::{ProcessorExt, System, SystemExt};
let s = System::new();
let processors = s
.get_processors()
.iter()
.map(|p| ProcessorData {
name: p.get_name().to_string(),
brand: p.get_brand().to_string(),
frequency: p.get_frequency().to_string(),
})
.collect();
Ok(processors)
} else {
Ok(vec![])
}
}
}
pub struct MaxRetriesHandler {
max_attempts: usize,
}
impl MaxRetriesHandler {
pub fn new(max_attempts: usize) -> Self {
MaxRetriesHandler { max_attempts }
}
}
impl ErrorHandler<anyhow::Error> for MaxRetriesHandler {
type OutError = anyhow::Error;
fn handle(&mut self, attempt: usize, e: anyhow::Error) -> RetryPolicy<Self::OutError> {
warn!(
"Failed: {}, retry {}/{}",
e.to_string(),
attempt,
self.max_attempts,
);
if attempt >= self.max_attempts {
RetryPolicy::ForwardError(e)
} else {
RetryPolicy::WaitRetry(
chrono::Duration::seconds(5)
.to_std()
.expect("Should have converted to standard duration"),
)
}
}
}
pub fn challenge_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.accumulator_size as u64
}
pub fn response_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.contribution_size as u64
}
pub fn load_transcript() -> Result<Transcript> {
let filename = "transcript";
if!std::path::Path::new(filename).exists() {
let mut file = File::create(filename)?;
file.write_all(
serde_json::to_string_pretty(&Transcript {
rounds: vec![],
beacon_hash: None,
final_hash: None,
})?
.as_bytes(),
)?;
}
let mut file = File::open(filename)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let transcript: Transcript = serde_json::from_str::<Transcript>(&contents)?;
Ok(transcript)
}
pub fn save_transcript(transcript: &Transcript) -> Result<()> {
let filename = "transcript";
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn backup_transcript(transcript: &Transcript) -> Result<()> {
let filename = format!("transcript_{}", chrono::Utc::now().timestamp_nanos());
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn format_attestation(attestation_message: &str, address: &str, signature: &str) -> String {
format!("{} {} {}", attestation_message, address, signature)
}
pub fn extract_signature_from_attestation(attestation: &str) -> Result<(String, String, String)> {
let attestation = attestation.to_string();
let attestation_parts = attestation.split(" ").collect::<Vec<_>>();
if attestation_parts.len() < 3 {
return Err(UtilsError::AttestationTooShort(attestation_parts.len()).into());
}
Ok((
attestation_parts[0..=attestation_parts.len() - 3].join(" "),
attestation_parts[attestation_parts.len() - 2].to_string(),
attestation_parts[attestation_parts.len() - 1].to_string(),
))
}
pub fn write_attestation_to_file(attestation: &Attestation, path: &str) -> Result<()> {
File::create(path)?.write_all(
format_attestation(
&attestation.id,
&attestation.address,
&attestation.signature,
)
.as_bytes(),
)?;
Ok(())
}
pub fn trim_newline(s: &mut String) {
if s.ends_with('\n') {
s.pop();
if s.ends_with('\r') {
s.pop();
}
}
}
pub fn compute_hash_from_file(fname: &str) -> Result<String> {
let challenge_contents = std::fs::read(fname)?;
Ok(hex::encode(setup_utils::calculate_hash(
&challenge_contents,
)))
} | .to_str()? | random_line_split |
utils.rs | pub const PLUMO_SETUP_PERSONALIZATION: &[u8] = b"PLUMOSET";
pub const ADDRESS_LENGTH: usize = 20;
pub const ADDRESS_LENGTH_IN_HEX: usize = 42;
pub const SIGNATURE_LENGTH_IN_HEX: usize = 130;
pub const DEFAULT_MAX_RETRIES: usize = 5;
pub const ONE_MB: usize = 1024 * 1024;
pub const DEFAULT_CHUNK_SIZE: u64 = 1 * (ONE_MB as u64);
pub const DEFAULT_NUM_PARALLEL_CHUNKS: usize = 50;
pub const DEFAULT_CHUNK_TIMEOUT_IN_SECONDS: u64 = 300;
pub const BEACON_HASH_LENGTH: usize = 32;
use crate::blobstore::{upload_access_key, upload_sas};
use crate::data_structs::{
Attestation, Ceremony, Parameters, PlumoSetupKeys, ProcessorData, Response,
};
use crate::error::{UtilsError, VerifyTranscriptError};
use age::{
armor::{ArmoredWriter, Format},
EncryptError, Encryptor,
};
use algebra::PairingEngine;
use anyhow::Result;
use ethers::types::{Address, Signature};
use hex::ToHex;
use phase1::{ContributionMode, Phase1Parameters, ProvingSystem};
use reqwest::header::{AUTHORIZATION, CONTENT_LENGTH, CONTENT_TYPE, RANGE};
use secrecy::{ExposeSecret, SecretString, SecretVec};
use serde::Serialize;
use std::{
fs::{copy, remove_file, File, OpenOptions},
io::{Read, Write},
path::Path,
str::FromStr,
};
use tracing::warn;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Phase {
Phase1,
Phase2,
}
pub fn string_to_phase(str: &str) -> Result<Phase> {
match str.to_lowercase().as_ref() {
"phase1" => Ok(Phase::Phase1),
"phase2" => Ok(Phase::Phase2),
"" => Err(UtilsError::NoPhaseError.into()),
x => Err(UtilsError::UnknownPhaseError(x.to_string()).into()),
}
}
pub fn copy_file_if_exists(file_path: &str, dest_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
copy(file_path, dest_path)?;
}
Ok(())
}
pub fn download_file(url: &str, file_path: &str) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut resp = reqwest::blocking::get(url)?.error_for_status()?;
let mut out = File::create(file_path)?;
resp.copy_to(&mut out)?;
Ok(())
}
pub async fn download_file_from_azure_async(
url: &str,
expected_length: u64,
file_path: &str,
) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut out = File::create(file_path)?;
let num_chunks = (expected_length + DEFAULT_CHUNK_SIZE - 1) / DEFAULT_CHUNK_SIZE;
let mut futures = vec![];
for chunk_index in 0..num_chunks {
let url = url.to_string();
futures.push(tokio::spawn(FutureRetry::new(
move || {
let url = url.clone();
async move {
let start = chunk_index * DEFAULT_CHUNK_SIZE;
let end = if chunk_index == num_chunks - 1 {
expected_length - 1
} else {
(chunk_index + 1) * DEFAULT_CHUNK_SIZE - 1
};
let client = reqwest::Client::new();
let mut resp = client
.get(&url)
.header(CONTENT_TYPE, "application/octet-stream")
.header(RANGE, format!("bytes={}-{}", start, end))
.header(CONTENT_LENGTH, 0)
.timeout(std::time::Duration::from_secs(
DEFAULT_CHUNK_TIMEOUT_IN_SECONDS,
))
.send()
.await?
.error_for_status()?;
let mut bytes = Vec::with_capacity((end - start + 1) as usize);
while let Some(chunk) = resp.chunk().await? {
bytes.write_all(&chunk)?;
}
Ok::<Vec<u8>, anyhow::Error>(bytes)
}
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)));
}
let bytes_list = futures::future::try_join_all(futures)
.await?
.into_iter()
.collect::<Result<Vec<_>, _>>()
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?
.into_iter()
.map(|(v, _)| v);
for bytes in bytes_list {
out.write_all(&bytes)?;
}
Ok(())
}
pub async fn download_file_direct_async(url: &str, file_path: &str) -> Result<()> {
let url = url.to_string();
let file_path = file_path.to_string();
FutureRetry::new(
|| async {
remove_file_if_exists(&file_path)?;
let mut resp = reqwest::get(&url).await?.error_for_status()?;
let mut out = File::create(&file_path)?;
while let Some(chunk) = resp.chunk().await? {
out.write_all(&chunk)?;
}
Ok(())
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)
.await
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?;
Ok(())
}
pub async fn upload_file_to_azure_async(file_path: &str, url: &str) -> Result<()> {
upload_sas(file_path, url).await?;
Ok(())
}
pub async fn upload_file_to_azure_with_access_key_async(
file_path: &str,
access_key: &str,
account: &str,
container: &str,
path: &str,
) -> Result<()> {
upload_access_key(file_path, access_key, account, container, path).await?;
Ok(())
}
pub async fn upload_file_direct_async(
authorization: &str,
file_path: &str,
url: &str,
) -> Result<()> {
let mut file = File::open(file_path)?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)?;
let client = reqwest::Client::new();
client
.post(url)
.header(AUTHORIZATION, authorization)
.header(CONTENT_TYPE, "application/octet-stream")
.body(contents)
.send()
.await?
.error_for_status()?;
Ok(())
}
pub fn vrs_to_rsv(rsv: &str) -> String {
format!("{}{}{}", &rsv[2..66], &rsv[66..130], &rsv[..2])
}
pub fn remove_file_if_exists(file_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
remove_file(file_path)?;
}
Ok(())
}
pub async fn get_content_length(url: &str) -> Result<u64> {
let client = reqwest::Client::new();
let result = client.head(url).send().await?.error_for_status()?;
Ok(result.headers()["content-length"]
.to_str()?
.parse::<u64>()?)
}
pub async fn get_ceremony(url: &str) -> Result<Ceremony> {
let response = reqwest::get(url).await?.error_for_status()?;
let data = response.text().await?;
let ceremony: Ceremony = serde_json::from_str::<Response<Ceremony>>(&data)?.result;
Ok(ceremony)
}
use crate::transcript_data_structs::Transcript;
use blake2::{Blake2s, Digest};
use ethers::signers::{LocalWallet, Signer};
use futures_retry::{ErrorHandler, FutureRetry, RetryPolicy};
use rand::rngs::OsRng;
use rand::RngCore;
pub fn verify_signed_data<T: Serialize>(data: &T, signature: &str, id: &str) -> Result<()> {
let signature = Signature::from_str(&signature[2..])?;
let serialized_data = serde_json::to_string(data)?;
let deserialized_id = hex::decode(&id[2..])?;
if deserialized_id.len()!= ADDRESS_LENGTH {
return Err(VerifyTranscriptError::IDWrongLength(deserialized_id.len()).into());
}
let mut address = [0u8; ADDRESS_LENGTH];
address.copy_from_slice(&deserialized_id);
let address = Address::from(address);
signature.verify(serialized_data, address)?;
Ok(())
}
pub fn read_hash_from_file(file_name: &str) -> Result<String> {
let mut hash = vec![];
File::open(file_name)
.expect("Should have opened hash file.")
.read_to_end(&mut hash)
.expect("Should have read hash file.");
let hash_hex = hex::encode(&hash);
Ok(hash_hex)
}
pub fn proving_system_from_str(proving_system_str: &str) -> Result<ProvingSystem> {
let proving_system = match proving_system_str {
"groth16" => ProvingSystem::Groth16,
"marlin" => ProvingSystem::Marlin,
_ => {
return Err(VerifyTranscriptError::UnsupportedProvingSystemError(
proving_system_str.to_string(),
)
.into());
}
};
Ok(proving_system)
}
pub fn check_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a!= b {
return Err(VerifyTranscriptError::WrongChallengeHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_response_hashes_same(a: &str, b: &str) -> Result<()> {
if a!= b {
return Err(VerifyTranscriptError::WrongResponseHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_new_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a!= b {
return Err(
VerifyTranscriptError::WrongNewChallengeHash(a.to_string(), b.to_string()).into(),
);
}
Ok(())
}
pub fn get_authorization_value(
private_key: &LocalWallet,
method: &str,
path: &str,
) -> Result<String> {
let address = private_key.address().encode_hex::<String>();
let message = format!("{} /{}", method.to_lowercase(), path.to_lowercase());
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
let authorization = format!("Celo 0x{}:0x{}", address, signature.to_string());
Ok(authorization)
}
pub fn create_parameters_for_chunk<E: PairingEngine>(
ceremony_parameters: &Parameters,
chunk_index: usize,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_chunk(
ContributionMode::Chunked,
chunk_index,
ceremony_parameters.chunk_size,
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn create_full_parameters<E: PairingEngine>(
ceremony_parameters: &Parameters,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_full(
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn sign_json(private_key: &LocalWallet, value: &serde_json::Value) -> Result<String> {
let message = serde_json::to_string(value)?;
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
Ok(format!("0x{}", signature.to_string()))
}
pub fn address_to_string(address: &Address) -> String {
format!("0x{}", address.encode_hex::<String>())
}
#[derive(Debug, Clone, Copy)]
pub enum UploadMode {
Auto,
Azure,
Direct,
}
pub fn upload_mode_from_str(upload_mode: &str) -> Result<UploadMode> {
match upload_mode {
"auto" => Ok(UploadMode::Auto),
"azure" => Ok(UploadMode::Azure),
"direct" => Ok(UploadMode::Direct),
_ => Err(UtilsError::UnknownUploadModeError(upload_mode.to_string()).into()),
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum ParticipationMode {
Contribute,
Verify,
}
pub fn participation_mode_from_str(participation_mode: &str) -> Result<ParticipationMode> {
match participation_mode {
"contribute" => Ok(ParticipationMode::Contribute),
"verify" => Ok(ParticipationMode::Verify),
_ => Err(UtilsError::UnknownParticipationModeError(participation_mode.to_string()).into()),
}
}
fn decrypt(passphrase: &SecretString, encrypted: &str) -> Result<Vec<u8>> {
let decoded = SecretVec::new(hex::decode(encrypted)?);
let decryptor = age::Decryptor::new(decoded.expose_secret().as_slice())?;
let mut output = vec![];
if let age::Decryptor::Passphrase(decryptor) = decryptor {
let mut reader = decryptor.decrypt(passphrase, None)?;
reader.read_to_end(&mut output)?;
} else {
return Err(UtilsError::UnsupportedDecryptorError.into());
}
Ok(output)
}
pub fn encrypt(encryptor: Encryptor, secret: &[u8]) -> Result<String> {
let mut encrypted_output = vec![];
let mut writer = encryptor
.wrap_output(ArmoredWriter::wrap_output(
&mut encrypted_output,
Format::Binary,
)?)
.map_err(|e| match e {
EncryptError::Io(e) => e,
})?;
std::io::copy(&mut std::io::Cursor::new(secret), &mut writer)?;
writer.finish()?;
let encrypted_secret = hex::encode(&encrypted_output);
Ok(encrypted_secret.to_string())
}
pub fn read_keys(
keys_file: &str,
should_use_stdin: bool,
should_collect_extra_entropy: bool,
) -> Result<(SecretVec<u8>, SecretVec<u8>, Attestation)> {
let mut contents = String::new();
{
std::fs::File::open(&keys_file)?.read_to_string(&mut contents)?;
}
let mut keys: PlumoSetupKeys = serde_json::from_str(&contents)?;
let description = "Enter your Plumo setup passphrase";
let passphrase = if should_use_stdin {
println!("{}:", description);
SecretString::new(rpassword::read_password()?)
} else {
age::cli_common::read_secret(description, "Passphrase", None)
.map_err(|_| UtilsError::CouldNotReadPassphraseError)?
};
let plumo_seed_from_file = SecretVec::new(decrypt(&passphrase, &keys.encrypted_seed)?);
let plumo_private_key_from_file =
SecretVec::new(decrypt(&passphrase, &keys.encrypted_private_key)?);
if should_collect_extra_entropy && keys.encrypted_extra_entropy.is_none() &&!should_use_stdin {
let description = "Enter some extra entropy (this should only be done at the first time you run the contribute binary!)";
let entered_entropy = age::cli_common::read_secret(description, "Entropy", None)
.map_err(|_| UtilsError::CouldNotReadEntropyError)?;
let encryptor = age::Encryptor::with_user_passphrase(passphrase.clone());
let mut rng = OsRng;
let mut extra_entropy = vec![0u8; 64];
rng.fill_bytes(&mut extra_entropy[..]);
let extra_entropy = SecretVec::new(extra_entropy);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(extra_entropy.expose_secret());
hasher.update(entered_entropy.expose_secret());
let combined_entropy = SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec());
let encrypted_extra_entropy = encrypt(encryptor, combined_entropy.expose_secret())?;
keys.encrypted_extra_entropy = Some(encrypted_extra_entropy);
let mut file = OpenOptions::new().write(true).open(&keys_file)?;
file.write_all(&serde_json::to_vec(&keys)?)?;
file.sync_all()?;
}
let plumo_seed = match keys.encrypted_extra_entropy {
None => plumo_seed_from_file,
Some(encrypted_entropy) => {
let entropy = SecretVec::new(decrypt(&passphrase, &encrypted_entropy)?);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(plumo_seed_from_file.expose_secret());
hasher.update(entropy.expose_secret());
SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec())
}
};
Ok((plumo_seed, plumo_private_key_from_file, keys.attestation))
}
pub fn collect_processor_data() -> Result<Vec<ProcessorData>> {
cfg_if::cfg_if! {
if #[cfg(not(target_arch = "aarch64"))] {
use sysinfo::{ProcessorExt, System, SystemExt};
let s = System::new();
let processors = s
.get_processors()
.iter()
.map(|p| ProcessorData {
name: p.get_name().to_string(),
brand: p.get_brand().to_string(),
frequency: p.get_frequency().to_string(),
})
.collect();
Ok(processors)
} else {
Ok(vec![])
}
}
}
pub struct MaxRetriesHandler {
max_attempts: usize,
}
impl MaxRetriesHandler {
pub fn new(max_attempts: usize) -> Self {
MaxRetriesHandler { max_attempts }
}
}
impl ErrorHandler<anyhow::Error> for MaxRetriesHandler {
type OutError = anyhow::Error;
fn handle(&mut self, attempt: usize, e: anyhow::Error) -> RetryPolicy<Self::OutError> {
warn!(
"Failed: {}, retry {}/{}",
e.to_string(),
attempt,
self.max_attempts,
);
if attempt >= self.max_attempts {
RetryPolicy::ForwardError(e)
} else {
RetryPolicy::WaitRetry(
chrono::Duration::seconds(5)
.to_std()
.expect("Should have converted to standard duration"),
)
}
}
}
pub fn challenge_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.accumulator_size as u64
}
pub fn response_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.contribution_size as u64
}
pub fn load_transcript() -> Result<Transcript> {
let filename = "transcript";
if!std::path::Path::new(filename).exists() {
let mut file = File::create(filename)?;
file.write_all(
serde_json::to_string_pretty(&Transcript {
rounds: vec![],
beacon_hash: None,
final_hash: None,
})?
.as_bytes(),
)?;
}
let mut file = File::open(filename)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let transcript: Transcript = serde_json::from_str::<Transcript>(&contents)?;
Ok(transcript)
}
pub fn save_transcript(transcript: &Transcript) -> Result<()> {
let filename = "transcript";
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn | (transcript: &Transcript) -> Result<()> {
let filename = format!("transcript_{}", chrono::Utc::now().timestamp_nanos());
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn format_attestation(attestation_message: &str, address: &str, signature: &str) -> String {
format!("{} {} {}", attestation_message, address, signature)
}
pub fn extract_signature_from_attestation(attestation: &str) -> Result<(String, String, String)> {
let attestation = attestation.to_string();
let attestation_parts = attestation.split(" ").collect::<Vec<_>>();
if attestation_parts.len() < 3 {
return Err(UtilsError::AttestationTooShort(attestation_parts.len()).into());
}
Ok((
attestation_parts[0..=attestation_parts.len() - 3].join(" "),
attestation_parts[attestation_parts.len() - 2].to_string(),
attestation_parts[attestation_parts.len() - 1].to_string(),
))
}
pub fn write_attestation_to_file(attestation: &Attestation, path: &str) -> Result<()> {
File::create(path)?.write_all(
format_attestation(
&attestation.id,
&attestation.address,
&attestation.signature,
)
.as_bytes(),
)?;
Ok(())
}
pub fn trim_newline(s: &mut String) {
if s.ends_with('\n') {
s.pop();
if s.ends_with('\r') {
s.pop();
}
}
}
pub fn compute_hash_from_file(fname: &str) -> Result<String> {
let challenge_contents = std::fs::read(fname)?;
Ok(hex::encode(setup_utils::calculate_hash(
&challenge_contents,
)))
}
| backup_transcript | identifier_name |
context.rs | //!The [`Context`][context] contains all the input data for the request
//!handlers, as well as some utilities. This is where request data, like
//!headers, client address and the request body can be retrieved from and it
//!can safely be picked apart, since its ownership is transferred to the
//!handler.
//!
//!##Accessing Headers
//!
//!The headers are stored in the `headers` field. See the [`Headers`][headers]
//!struct for more information about how to access them.
//!
//!```
//!use rustful::{Context, Response};
//!use rustful::header::UserAgent;
//!
//!fn my_handler(context: Context, response: Response) {
//! if let Some(&UserAgent(ref user_agent)) = context.headers.get() {
//! response.send(format!("got user agent string \"{}\"", user_agent));
//! } else {
//! response.send("no user agent string provided");
//! }
//!}
//!```
//!
//!##Path Variables
//!
//!A router may collect variable data from paths (for example `id` in
//!`/products/:id`). The values from these variables can be accessed through
//!the `variables` field.
//!
//!```
//!use rustful::{Context, Response};
//!
//!fn my_handler(context: Context, response: Response) {
//! if let Some(id) = context.variables.get("id") {
//! response.send(format!("asking for product with id \"{}\"", id));
//! } else {
//! //This will usually not happen, unless the handler is also
//! //assigned to a path without the `id` variable
//! response.send("no id provided");
//! }
//!}
//!```
//!
//!##Other URL Parts
//!
//! * Query variables (`http://example.com?a=b&c=d`) can be found in the
//!`query` field and they are accessed in exactly the same fashion as path
//!variables are used.
//!
//! * The fragment (`http://example.com#foo`) is also parsed and can be
//!accessed through `fragment` as an optional `String`.
//!
//!##Logging
//!
//!Rustful has a built in logging infrastructure and it is made available to
//!handlers through the `log` field. This provides logging and error reporting
//!in a unified and more controlled fashion than what panics and `println!`
//!gives. See the [`log`][log] module for more information about the standard
//!alternatives.
//!
//!```
//!# fn something_that_may_fail() -> Result<&'static str, &'static str> { Ok("yo") }
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! match something_that_may_fail() {
//! Ok(res) => response.send(res),
//! Err(e) => {
//! context.log.error(&format!("it failed! {}", e));
//! response.set_status(InternalServerError);
//! }
//! }
//!}
//!```
//!
//!##Global Data
//!
//!There is also infrastructure for globally accessible data, that can be
//!accessed through the `global` field. This is meant to provide a place for
//!things like database connections or cached data that should be available to
//!all handlers. The storage space itself is immutable when the server has
//!started, so the only way to change it is through some kind of inner
//!mutability.
//!
//!```
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! if let Some(some_wise_words) = context.global.get::<&str>() {
//! response.send(format!("food for thought: {}", some_wise_words));
//! } else {
//! context.log.error("there should be a string literal in `global`");
//! response.set_status(InternalServerError);
//! }
//!}
//!```
//!
//!##Request Body
//!
//!The body will not be read in advance, unlike the other parts of the
//!request. It is instead available as a `BodyReader` in the field `body`,
//!through which it can be read and parsed as various data formats, like JSON
//!and query strings. The documentation for [`BodyReader`][body_reader] gives
//!more examples.
//!
//!```
//!use std::io::{BufReader, BufRead};
//!use rustful::{Context, Response};
//!
//!fn my_handler(context: Context, response: Response) {
//! let mut numbered_lines = BufReader::new(context.body).lines().enumerate();
//! let mut writer = response.into_chunked();
//!
//! while let Some((line_no, Ok(line))) = numbered_lines.next() {
//! writer.send(format!("{}: {}", line_no + 1, line));
//! }
//!}
//!```
//!
//![context]: struct.Context.html
//![headers]:../header/struct.Headers.html
//![log]:../log/index.html
//![body_reader]: struct.BodyReader.html
use std::collections::HashMap;
use std::io::{self, Read};
use std::net::SocketAddr;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::json;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::Decodable;
use hyper::http::h1::HttpReader;
use hyper::net::NetworkStream;
use hyper::buffer::BufReader;
#[cfg(feature = "multipart")]
use multipart::server::{HttpRequest, Multipart};
use utils;
use HttpVersion;
use Method;
use header::Headers;
use log::Log;
use Global;
///A container for handler input, like request data and utilities.
pub struct Context<'a, 'b: 'a,'s> {
///Headers from the HTTP request.
pub headers: Headers,
///The HTTP version used in the request.
pub http_version: HttpVersion,
///The client address
pub address: SocketAddr,
///The HTTP method.
pub method: Method,
///The requested path.
pub path: String,
///Hypermedia from the current endpoint.
pub hypermedia: Hypermedia<'s>,
///Route variables.
pub variables: HashMap<String, String>,
///Query variables from the path.
pub query: HashMap<String, String>,
///The fragment part of the URL (after #), if provided.
pub fragment: Option<String>,
///Log for notes, errors and warnings.
pub log: &'s (Log +'s),
///Globally accessible data.
pub global: &'s Global,
///A reader for the request body.
pub body: BodyReader<'a, 'b>,
}
///A reader for a request body.
pub struct BodyReader<'a, 'b: 'a> {
reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>,
#[cfg(feature = "multipart")]
multipart_boundary: Option<String>
}
#[cfg(feature = "multipart")]
impl<'a, 'b> BodyReader<'a, 'b> {
///Try to create a `multipart/form-data` reader from the request body.
///
///```
///# extern crate rustful;
///# extern crate multipart;
///use std::fmt::Write;
///use rustful::{Context, Response};
///use rustful::StatusCode::BadRequest;
///use multipart::server::MultipartData;
///
///fn my_handler(mut context: Context, mut response: Response) {
/// if let Some(mut multipart) = context.body.as_multipart() {
/// let mut result = String::new();
///
/// //Iterate over the multipart entries and print info about them in `result`
/// multipart.foreach_entry(|entry| match entry.data {
/// MultipartData::Text(text) => {
/// //Found data from a text field
/// writeln!(&mut result, "{}: '{}'", entry.name, text);
/// },
/// MultipartData::File(file) => {
/// //Found an uploaded file
/// if let Some(file_name) = file.filename() {
/// writeln!(&mut result, "{}: a file called '{}'", entry.name, file_name);
/// } else {
/// writeln!(&mut result, "{}: a nameless file", entry.name);
/// }
/// }
/// });
///
/// response.send(result);
/// } else {
/// //We expected it to be a valid `multipart/form-data` request, but it was not
/// response.set_status(BadRequest);
/// }
///}
///# fn main() {}
///```
pub fn as_multipart<'r>(&'r mut self) -> Option<Multipart<MultipartRequest<'r, 'a, 'b>>> {
let reader = &mut self.reader;
self.multipart_boundary.as_ref().and_then(move |boundary|
Multipart::from_request(MultipartRequest {
boundary: boundary,
reader: reader
}).ok()
)
}
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, headers: &Headers) -> BodyReader<'a, 'b> {
use header::ContentType;
use mime::{Mime, TopLevel, SubLevel, Attr, Value};
let boundary = match headers.get() {
Some(&ContentType(Mime(TopLevel::Multipart, SubLevel::FormData, ref attrs))) => {
attrs.iter()
.find(|&&(ref attr, _)| attr == &Attr::Boundary)
.and_then(|&(_, ref val)| if let Value::Ext(ref boundary) = *val {
Some(boundary.clone())
} else {
None
})
},
_ => None
};
BodyReader {
reader: reader,
multipart_boundary: boundary
}
}
}
#[cfg(not(feature = "multipart"))]
impl<'a, 'b> BodyReader<'a, 'b> {
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, _headers: &Headers) -> BodyReader<'a, 'b> {
BodyReader {
reader: reader
}
}
}
///`BodyReader` extension for reading and parsing a query string.
///
///Examples and more information can be found in [the documentation for
///`BodyReader`][body_reader].
///
///[body_reader]: struct.BodyReader.html
pub trait ExtQueryBody {
fn read_query_body(&mut self) -> io::Result<HashMap<String, String>>;
}
impl<'a, 'b> ExtQueryBody for BodyReader<'a, 'b> {
///Read and parse the request body as a query string. The body will be
///decoded as UTF-8 and plain '+' characters will be replaced with spaces.
///
///A simplified example of how to parse `a=number&b=number`:
///
///```
///use rustful::{Context, Response};
///use rustful::context::ExtQueryBody;
///
///fn my_handler(mut context: Context, response: Response) {
/// //Parse the request body as a query string
/// let query = context.body.read_query_body().unwrap();
///
/// //Find "a" and "b" and assume that they are numbers
/// let a: f64 = query.get("a").and_then(|number| number.parse().ok()).unwrap();
/// let b: f64 = query.get("b").and_then(|number| number.parse().ok()).unwrap();
///
/// response.send(format!("{} + {} = {}", a, b, a + b));
///}
///```
#[inline]
fn read_query_body(&mut self) -> io::Result<HashMap<String, String>> {
let mut buf = Vec::new();
try!(self.read_to_end(&mut buf));
Ok(utils::parse_parameters(&buf))
}
}
///`BodyReader` extension for reading and parsing a JSON body.
///
///It is available by default and can be toggled using the `rustc_json_body`
///feature. Examples and more information can be found in [the documentation
///for `BodyReader`][body_reader].
///
///[body_reader]: struct.BodyReader.html
#[cfg(feature = "rustc_json_body")]
pub trait ExtJsonBody {
///Read the request body into a JSON structure.
fn read_json_body(&mut self) -> Result<json::Json, json::BuilderError>;
///Parse and decode the request body as some type `T`.
fn decode_json_body<T: Decodable>(&mut self) -> json::DecodeResult<T>;
}
#[cfg(feature = "rustc_json_body")]
impl<'a, 'b> ExtJsonBody for BodyReader<'a, 'b> {
///Read the request body into a generic JSON structure. This structure can
///then be navigated and parsed freely.
///
///A simplified example of how to parse `{ "a": number, "b": number }`:
///
///```
///use rustful::{Context, Response};
///use rustful::context::ExtJsonBody;
///
///fn my_handler(mut context: Context, response: Response) {
/// //Parse the request body as JSON
/// let json = context.body.read_json_body().unwrap();
///
/// //Find "a" and "b" in the root object and assume that they are numbers
/// let a = json.find("a").and_then(|number| number.as_f64()).unwrap();
/// let b = json.find("b").and_then(|number| number.as_f64()).unwrap();
///
/// response.send(format!("{} + {} = {}", a, b, a + b));
///}
///```
fn read_json_body(&mut self) -> Result<json::Json, json::BuilderError> {
json::Json::from_reader(self)
}
///Read and decode a request body as a type `T`. The target type must
///implement `rustc_serialize::Decodable`.
///
///A simplified example of how to parse `{ "a": number, "b": number }`:
///
///```
///extern crate rustful;
///extern crate rustc_serialize;
///
///use rustful::{Context, Response};
///use rustful::context::ExtJsonBody;
///
///#[derive(RustcDecodable)]
///struct Foo {
/// a: f64,
/// b: f64
///}
///
///fn my_handler(mut context: Context, response: Response) {
/// //Decode a JSON formatted request body into Foo
/// let foo: Foo = context.body.decode_json_body().unwrap();
///
/// response.send(format!("{} + {} = {}", foo.a, foo.b, foo.a + foo.b));
///}
///# fn main() {}
///```
fn decode_json_body<T: Decodable>(&mut self) -> json::DecodeResult<T> {
let mut buf = String::new();
try!(self.read_to_string(&mut buf).map_err(|e| {
let parse_err = json::ParserError::IoError(e);
json::DecoderError::ParseError(parse_err)
}));
json::decode(&buf)
}
}
impl<'a, 'b> Read for BodyReader<'a, 'b> {
///Read the request body.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.reader.read(buf)
}
}
///A specialized request representation for the multipart interface.
#[cfg(feature = "multipart")]
pub struct MultipartRequest<'r, 'a: 'r, 'b: 'a> {
boundary: &'r str,
reader: &'r mut HttpReader<&'a mut BufReader<&'b mut NetworkStream>>
}
#[cfg(feature = "multipart")]
impl<'r, 'a, 'b> HttpRequest for MultipartRequest<'r, 'a, 'b> {
fn multipart_boundary(&self) -> Option<&str> {
Some(self.boundary)
}
}
#[cfg(feature = "multipart")]
impl<'r, 'a, 'b> Read for MultipartRequest<'r, 'a, 'b> {
///Read the request body.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.reader.read(buf)
}
}
///Hypermedia connected to an API endpoint.
pub struct Hypermedia<'a> {
///Forward links from the current endpoint to other endpoints. These may
///include endpoints with the same path, but different method.
pub links: Vec<Link<'a>>
}
impl<'a> Hypermedia<'a> {
///Create an empty `Hypermedia` structure.
pub fn new() -> Hypermedia<'a> {
Hypermedia {
links: vec![]
}
}
}
///A hyperlink.
#[derive(PartialEq, Eq, Debug)]
pub struct Link<'a> {
///The HTTP method for which an endpoint is available. It can be left
///unspecified if the method doesn't matter.
pub method: Option<Method>,
///A relative path from the current location.
pub path: Vec<LinkSegment<'a>>
}
///A segment of a hyperlink path.
#[derive(PartialEq, Eq, Debug)]
pub enum LinkSegment<'a> {
///A static part of a path.
Static(&'a str),
///A dynamic part of a path. Can be substituted with anything.
Variable(&'a str),
///A recursive wildcard. Will recursively match anything.
RecursiveWildcard
} | //!Handler context and request body reading extensions.
//!
//!#Context
//! | random_line_split |
|
context.rs | //!Handler context and request body reading extensions.
//!
//!#Context
//!
//!The [`Context`][context] contains all the input data for the request
//!handlers, as well as some utilities. This is where request data, like
//!headers, client address and the request body can be retrieved from and it
//!can safely be picked apart, since its ownership is transferred to the
//!handler.
//!
//!##Accessing Headers
//!
//!The headers are stored in the `headers` field. See the [`Headers`][headers]
//!struct for more information about how to access them.
//!
//!```
//!use rustful::{Context, Response};
//!use rustful::header::UserAgent;
//!
//!fn my_handler(context: Context, response: Response) {
//! if let Some(&UserAgent(ref user_agent)) = context.headers.get() {
//! response.send(format!("got user agent string \"{}\"", user_agent));
//! } else {
//! response.send("no user agent string provided");
//! }
//!}
//!```
//!
//!##Path Variables
//!
//!A router may collect variable data from paths (for example `id` in
//!`/products/:id`). The values from these variables can be accessed through
//!the `variables` field.
//!
//!```
//!use rustful::{Context, Response};
//!
//!fn my_handler(context: Context, response: Response) {
//! if let Some(id) = context.variables.get("id") {
//! response.send(format!("asking for product with id \"{}\"", id));
//! } else {
//! //This will usually not happen, unless the handler is also
//! //assigned to a path without the `id` variable
//! response.send("no id provided");
//! }
//!}
//!```
//!
//!##Other URL Parts
//!
//! * Query variables (`http://example.com?a=b&c=d`) can be found in the
//!`query` field and they are accessed in exactly the same fashion as path
//!variables are used.
//!
//! * The fragment (`http://example.com#foo`) is also parsed and can be
//!accessed through `fragment` as an optional `String`.
//!
//!##Logging
//!
//!Rustful has a built in logging infrastructure and it is made available to
//!handlers through the `log` field. This provides logging and error reporting
//!in a unified and more controlled fashion than what panics and `println!`
//!gives. See the [`log`][log] module for more information about the standard
//!alternatives.
//!
//!```
//!# fn something_that_may_fail() -> Result<&'static str, &'static str> { Ok("yo") }
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! match something_that_may_fail() {
//! Ok(res) => response.send(res),
//! Err(e) => {
//! context.log.error(&format!("it failed! {}", e));
//! response.set_status(InternalServerError);
//! }
//! }
//!}
//!```
//!
//!##Global Data
//!
//!There is also infrastructure for globally accessible data, that can be
//!accessed through the `global` field. This is meant to provide a place for
//!things like database connections or cached data that should be available to
//!all handlers. The storage space itself is immutable when the server has
//!started, so the only way to change it is through some kind of inner
//!mutability.
//!
//!```
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! if let Some(some_wise_words) = context.global.get::<&str>() {
//! response.send(format!("food for thought: {}", some_wise_words));
//! } else {
//! context.log.error("there should be a string literal in `global`");
//! response.set_status(InternalServerError);
//! }
//!}
//!```
//!
//!##Request Body
//!
//!The body will not be read in advance, unlike the other parts of the
//!request. It is instead available as a `BodyReader` in the field `body`,
//!through which it can be read and parsed as various data formats, like JSON
//!and query strings. The documentation for [`BodyReader`][body_reader] gives
//!more examples.
//!
//!```
//!use std::io::{BufReader, BufRead};
//!use rustful::{Context, Response};
//!
//!fn my_handler(context: Context, response: Response) {
//! let mut numbered_lines = BufReader::new(context.body).lines().enumerate();
//! let mut writer = response.into_chunked();
//!
//! while let Some((line_no, Ok(line))) = numbered_lines.next() {
//! writer.send(format!("{}: {}", line_no + 1, line));
//! }
//!}
//!```
//!
//![context]: struct.Context.html
//![headers]:../header/struct.Headers.html
//![log]:../log/index.html
//![body_reader]: struct.BodyReader.html
use std::collections::HashMap;
use std::io::{self, Read};
use std::net::SocketAddr;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::json;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::Decodable;
use hyper::http::h1::HttpReader;
use hyper::net::NetworkStream;
use hyper::buffer::BufReader;
#[cfg(feature = "multipart")]
use multipart::server::{HttpRequest, Multipart};
use utils;
use HttpVersion;
use Method;
use header::Headers;
use log::Log;
use Global;
///A container for handler input, like request data and utilities.
pub struct Context<'a, 'b: 'a,'s> {
///Headers from the HTTP request.
pub headers: Headers,
///The HTTP version used in the request.
pub http_version: HttpVersion,
///The client address
pub address: SocketAddr,
///The HTTP method.
pub method: Method,
///The requested path.
pub path: String,
///Hypermedia from the current endpoint.
pub hypermedia: Hypermedia<'s>,
///Route variables.
pub variables: HashMap<String, String>,
///Query variables from the path.
pub query: HashMap<String, String>,
///The fragment part of the URL (after #), if provided.
pub fragment: Option<String>,
///Log for notes, errors and warnings.
pub log: &'s (Log +'s),
///Globally accessible data.
pub global: &'s Global,
///A reader for the request body.
pub body: BodyReader<'a, 'b>,
}
///A reader for a request body.
pub struct BodyReader<'a, 'b: 'a> {
reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>,
#[cfg(feature = "multipart")]
multipart_boundary: Option<String>
}
#[cfg(feature = "multipart")]
impl<'a, 'b> BodyReader<'a, 'b> {
///Try to create a `multipart/form-data` reader from the request body.
///
///```
///# extern crate rustful;
///# extern crate multipart;
///use std::fmt::Write;
///use rustful::{Context, Response};
///use rustful::StatusCode::BadRequest;
///use multipart::server::MultipartData;
///
///fn my_handler(mut context: Context, mut response: Response) {
/// if let Some(mut multipart) = context.body.as_multipart() {
/// let mut result = String::new();
///
/// //Iterate over the multipart entries and print info about them in `result`
/// multipart.foreach_entry(|entry| match entry.data {
/// MultipartData::Text(text) => {
/// //Found data from a text field
/// writeln!(&mut result, "{}: '{}'", entry.name, text);
/// },
/// MultipartData::File(file) => {
/// //Found an uploaded file
/// if let Some(file_name) = file.filename() {
/// writeln!(&mut result, "{}: a file called '{}'", entry.name, file_name);
/// } else {
/// writeln!(&mut result, "{}: a nameless file", entry.name);
/// }
/// }
/// });
///
/// response.send(result);
/// } else {
/// //We expected it to be a valid `multipart/form-data` request, but it was not
/// response.set_status(BadRequest);
/// }
///}
///# fn main() {}
///```
pub fn as_multipart<'r>(&'r mut self) -> Option<Multipart<MultipartRequest<'r, 'a, 'b>>> {
let reader = &mut self.reader;
self.multipart_boundary.as_ref().and_then(move |boundary|
Multipart::from_request(MultipartRequest {
boundary: boundary,
reader: reader
}).ok()
)
}
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, headers: &Headers) -> BodyReader<'a, 'b> {
use header::ContentType;
use mime::{Mime, TopLevel, SubLevel, Attr, Value};
let boundary = match headers.get() {
Some(&ContentType(Mime(TopLevel::Multipart, SubLevel::FormData, ref attrs))) => {
attrs.iter()
.find(|&&(ref attr, _)| attr == &Attr::Boundary)
.and_then(|&(_, ref val)| if let Value::Ext(ref boundary) = *val | else {
None
})
},
_ => None
};
BodyReader {
reader: reader,
multipart_boundary: boundary
}
}
}
#[cfg(not(feature = "multipart"))]
impl<'a, 'b> BodyReader<'a, 'b> {
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, _headers: &Headers) -> BodyReader<'a, 'b> {
BodyReader {
reader: reader
}
}
}
///`BodyReader` extension for reading and parsing a query string.
///
///Examples and more information can be found in [the documentation for
///`BodyReader`][body_reader].
///
///[body_reader]: struct.BodyReader.html
pub trait ExtQueryBody {
fn read_query_body(&mut self) -> io::Result<HashMap<String, String>>;
}
impl<'a, 'b> ExtQueryBody for BodyReader<'a, 'b> {
///Read and parse the request body as a query string. The body will be
///decoded as UTF-8 and plain '+' characters will be replaced with spaces.
///
///A simplified example of how to parse `a=number&b=number`:
///
///```
///use rustful::{Context, Response};
///use rustful::context::ExtQueryBody;
///
///fn my_handler(mut context: Context, response: Response) {
/// //Parse the request body as a query string
/// let query = context.body.read_query_body().unwrap();
///
/// //Find "a" and "b" and assume that they are numbers
/// let a: f64 = query.get("a").and_then(|number| number.parse().ok()).unwrap();
/// let b: f64 = query.get("b").and_then(|number| number.parse().ok()).unwrap();
///
/// response.send(format!("{} + {} = {}", a, b, a + b));
///}
///```
#[inline]
fn read_query_body(&mut self) -> io::Result<HashMap<String, String>> {
let mut buf = Vec::new();
try!(self.read_to_end(&mut buf));
Ok(utils::parse_parameters(&buf))
}
}
///`BodyReader` extension for reading and parsing a JSON body.
///
///It is available by default and can be toggled using the `rustc_json_body`
///feature. Examples and more information can be found in [the documentation
///for `BodyReader`][body_reader].
///
///[body_reader]: struct.BodyReader.html
#[cfg(feature = "rustc_json_body")]
pub trait ExtJsonBody {
///Read the request body into a JSON structure.
fn read_json_body(&mut self) -> Result<json::Json, json::BuilderError>;
///Parse and decode the request body as some type `T`.
fn decode_json_body<T: Decodable>(&mut self) -> json::DecodeResult<T>;
}
#[cfg(feature = "rustc_json_body")]
impl<'a, 'b> ExtJsonBody for BodyReader<'a, 'b> {
///Read the request body into a generic JSON structure. This structure can
///then be navigated and parsed freely.
///
///A simplified example of how to parse `{ "a": number, "b": number }`:
///
///```
///use rustful::{Context, Response};
///use rustful::context::ExtJsonBody;
///
///fn my_handler(mut context: Context, response: Response) {
/// //Parse the request body as JSON
/// let json = context.body.read_json_body().unwrap();
///
/// //Find "a" and "b" in the root object and assume that they are numbers
/// let a = json.find("a").and_then(|number| number.as_f64()).unwrap();
/// let b = json.find("b").and_then(|number| number.as_f64()).unwrap();
///
/// response.send(format!("{} + {} = {}", a, b, a + b));
///}
///```
fn read_json_body(&mut self) -> Result<json::Json, json::BuilderError> {
json::Json::from_reader(self)
}
///Read and decode a request body as a type `T`. The target type must
///implement `rustc_serialize::Decodable`.
///
///A simplified example of how to parse `{ "a": number, "b": number }`:
///
///```
///extern crate rustful;
///extern crate rustc_serialize;
///
///use rustful::{Context, Response};
///use rustful::context::ExtJsonBody;
///
///#[derive(RustcDecodable)]
///struct Foo {
/// a: f64,
/// b: f64
///}
///
///fn my_handler(mut context: Context, response: Response) {
/// //Decode a JSON formatted request body into Foo
/// let foo: Foo = context.body.decode_json_body().unwrap();
///
/// response.send(format!("{} + {} = {}", foo.a, foo.b, foo.a + foo.b));
///}
///# fn main() {}
///```
fn decode_json_body<T: Decodable>(&mut self) -> json::DecodeResult<T> {
let mut buf = String::new();
try!(self.read_to_string(&mut buf).map_err(|e| {
let parse_err = json::ParserError::IoError(e);
json::DecoderError::ParseError(parse_err)
}));
json::decode(&buf)
}
}
impl<'a, 'b> Read for BodyReader<'a, 'b> {
///Read the request body.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.reader.read(buf)
}
}
///A specialized request representation for the multipart interface.
#[cfg(feature = "multipart")]
pub struct MultipartRequest<'r, 'a: 'r, 'b: 'a> {
boundary: &'r str,
reader: &'r mut HttpReader<&'a mut BufReader<&'b mut NetworkStream>>
}
#[cfg(feature = "multipart")]
impl<'r, 'a, 'b> HttpRequest for MultipartRequest<'r, 'a, 'b> {
fn multipart_boundary(&self) -> Option<&str> {
Some(self.boundary)
}
}
#[cfg(feature = "multipart")]
impl<'r, 'a, 'b> Read for MultipartRequest<'r, 'a, 'b> {
///Read the request body.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.reader.read(buf)
}
}
///Hypermedia connected to an API endpoint.
pub struct Hypermedia<'a> {
///Forward links from the current endpoint to other endpoints. These may
///include endpoints with the same path, but different method.
pub links: Vec<Link<'a>>
}
impl<'a> Hypermedia<'a> {
///Create an empty `Hypermedia` structure.
pub fn new() -> Hypermedia<'a> {
Hypermedia {
links: vec![]
}
}
}
///A hyperlink.
#[derive(PartialEq, Eq, Debug)]
pub struct Link<'a> {
///The HTTP method for which an endpoint is available. It can be left
///unspecified if the method doesn't matter.
pub method: Option<Method>,
///A relative path from the current location.
pub path: Vec<LinkSegment<'a>>
}
///A segment of a hyperlink path.
#[derive(PartialEq, Eq, Debug)]
pub enum LinkSegment<'a> {
///A static part of a path.
Static(&'a str),
///A dynamic part of a path. Can be substituted with anything.
Variable(&'a str),
///A recursive wildcard. Will recursively match anything.
RecursiveWildcard
}
| {
Some(boundary.clone())
} | conditional_block |
context.rs | //!Handler context and request body reading extensions.
//!
//!#Context
//!
//!The [`Context`][context] contains all the input data for the request
//!handlers, as well as some utilities. This is where request data, like
//!headers, client address and the request body can be retrieved from and it
//!can safely be picked apart, since its ownership is transferred to the
//!handler.
//!
//!##Accessing Headers
//!
//!The headers are stored in the `headers` field. See the [`Headers`][headers]
//!struct for more information about how to access them.
//!
//!```
//!use rustful::{Context, Response};
//!use rustful::header::UserAgent;
//!
//!fn my_handler(context: Context, response: Response) {
//! if let Some(&UserAgent(ref user_agent)) = context.headers.get() {
//! response.send(format!("got user agent string \"{}\"", user_agent));
//! } else {
//! response.send("no user agent string provided");
//! }
//!}
//!```
//!
//!##Path Variables
//!
//!A router may collect variable data from paths (for example `id` in
//!`/products/:id`). The values from these variables can be accessed through
//!the `variables` field.
//!
//!```
//!use rustful::{Context, Response};
//!
//!fn my_handler(context: Context, response: Response) {
//! if let Some(id) = context.variables.get("id") {
//! response.send(format!("asking for product with id \"{}\"", id));
//! } else {
//! //This will usually not happen, unless the handler is also
//! //assigned to a path without the `id` variable
//! response.send("no id provided");
//! }
//!}
//!```
//!
//!##Other URL Parts
//!
//! * Query variables (`http://example.com?a=b&c=d`) can be found in the
//!`query` field and they are accessed in exactly the same fashion as path
//!variables are used.
//!
//! * The fragment (`http://example.com#foo`) is also parsed and can be
//!accessed through `fragment` as an optional `String`.
//!
//!##Logging
//!
//!Rustful has a built in logging infrastructure and it is made available to
//!handlers through the `log` field. This provides logging and error reporting
//!in a unified and more controlled fashion than what panics and `println!`
//!gives. See the [`log`][log] module for more information about the standard
//!alternatives.
//!
//!```
//!# fn something_that_may_fail() -> Result<&'static str, &'static str> { Ok("yo") }
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! match something_that_may_fail() {
//! Ok(res) => response.send(res),
//! Err(e) => {
//! context.log.error(&format!("it failed! {}", e));
//! response.set_status(InternalServerError);
//! }
//! }
//!}
//!```
//!
//!##Global Data
//!
//!There is also infrastructure for globally accessible data, that can be
//!accessed through the `global` field. This is meant to provide a place for
//!things like database connections or cached data that should be available to
//!all handlers. The storage space itself is immutable when the server has
//!started, so the only way to change it is through some kind of inner
//!mutability.
//!
//!```
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! if let Some(some_wise_words) = context.global.get::<&str>() {
//! response.send(format!("food for thought: {}", some_wise_words));
//! } else {
//! context.log.error("there should be a string literal in `global`");
//! response.set_status(InternalServerError);
//! }
//!}
//!```
//!
//!##Request Body
//!
//!The body will not be read in advance, unlike the other parts of the
//!request. It is instead available as a `BodyReader` in the field `body`,
//!through which it can be read and parsed as various data formats, like JSON
//!and query strings. The documentation for [`BodyReader`][body_reader] gives
//!more examples.
//!
//!```
//!use std::io::{BufReader, BufRead};
//!use rustful::{Context, Response};
//!
//!fn my_handler(context: Context, response: Response) {
//! let mut numbered_lines = BufReader::new(context.body).lines().enumerate();
//! let mut writer = response.into_chunked();
//!
//! while let Some((line_no, Ok(line))) = numbered_lines.next() {
//! writer.send(format!("{}: {}", line_no + 1, line));
//! }
//!}
//!```
//!
//![context]: struct.Context.html
//![headers]:../header/struct.Headers.html
//![log]:../log/index.html
//![body_reader]: struct.BodyReader.html
use std::collections::HashMap;
use std::io::{self, Read};
use std::net::SocketAddr;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::json;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::Decodable;
use hyper::http::h1::HttpReader;
use hyper::net::NetworkStream;
use hyper::buffer::BufReader;
#[cfg(feature = "multipart")]
use multipart::server::{HttpRequest, Multipart};
use utils;
use HttpVersion;
use Method;
use header::Headers;
use log::Log;
use Global;
///A container for handler input, like request data and utilities.
pub struct Context<'a, 'b: 'a,'s> {
///Headers from the HTTP request.
pub headers: Headers,
///The HTTP version used in the request.
pub http_version: HttpVersion,
///The client address
pub address: SocketAddr,
///The HTTP method.
pub method: Method,
///The requested path.
pub path: String,
///Hypermedia from the current endpoint.
pub hypermedia: Hypermedia<'s>,
///Route variables.
pub variables: HashMap<String, String>,
///Query variables from the path.
pub query: HashMap<String, String>,
///The fragment part of the URL (after #), if provided.
pub fragment: Option<String>,
///Log for notes, errors and warnings.
pub log: &'s (Log +'s),
///Globally accessible data.
pub global: &'s Global,
///A reader for the request body.
pub body: BodyReader<'a, 'b>,
}
///A reader for a request body.
pub struct | <'a, 'b: 'a> {
reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>,
#[cfg(feature = "multipart")]
multipart_boundary: Option<String>
}
#[cfg(feature = "multipart")]
impl<'a, 'b> BodyReader<'a, 'b> {
///Try to create a `multipart/form-data` reader from the request body.
///
///```
///# extern crate rustful;
///# extern crate multipart;
///use std::fmt::Write;
///use rustful::{Context, Response};
///use rustful::StatusCode::BadRequest;
///use multipart::server::MultipartData;
///
///fn my_handler(mut context: Context, mut response: Response) {
/// if let Some(mut multipart) = context.body.as_multipart() {
/// let mut result = String::new();
///
/// //Iterate over the multipart entries and print info about them in `result`
/// multipart.foreach_entry(|entry| match entry.data {
/// MultipartData::Text(text) => {
/// //Found data from a text field
/// writeln!(&mut result, "{}: '{}'", entry.name, text);
/// },
/// MultipartData::File(file) => {
/// //Found an uploaded file
/// if let Some(file_name) = file.filename() {
/// writeln!(&mut result, "{}: a file called '{}'", entry.name, file_name);
/// } else {
/// writeln!(&mut result, "{}: a nameless file", entry.name);
/// }
/// }
/// });
///
/// response.send(result);
/// } else {
/// //We expected it to be a valid `multipart/form-data` request, but it was not
/// response.set_status(BadRequest);
/// }
///}
///# fn main() {}
///```
pub fn as_multipart<'r>(&'r mut self) -> Option<Multipart<MultipartRequest<'r, 'a, 'b>>> {
let reader = &mut self.reader;
self.multipart_boundary.as_ref().and_then(move |boundary|
Multipart::from_request(MultipartRequest {
boundary: boundary,
reader: reader
}).ok()
)
}
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, headers: &Headers) -> BodyReader<'a, 'b> {
use header::ContentType;
use mime::{Mime, TopLevel, SubLevel, Attr, Value};
let boundary = match headers.get() {
Some(&ContentType(Mime(TopLevel::Multipart, SubLevel::FormData, ref attrs))) => {
attrs.iter()
.find(|&&(ref attr, _)| attr == &Attr::Boundary)
.and_then(|&(_, ref val)| if let Value::Ext(ref boundary) = *val {
Some(boundary.clone())
} else {
None
})
},
_ => None
};
BodyReader {
reader: reader,
multipart_boundary: boundary
}
}
}
#[cfg(not(feature = "multipart"))]
impl<'a, 'b> BodyReader<'a, 'b> {
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, _headers: &Headers) -> BodyReader<'a, 'b> {
BodyReader {
reader: reader
}
}
}
///`BodyReader` extension for reading and parsing a query string.
///
///Examples and more information can be found in [the documentation for
///`BodyReader`][body_reader].
///
///[body_reader]: struct.BodyReader.html
pub trait ExtQueryBody {
fn read_query_body(&mut self) -> io::Result<HashMap<String, String>>;
}
impl<'a, 'b> ExtQueryBody for BodyReader<'a, 'b> {
///Read and parse the request body as a query string. The body will be
///decoded as UTF-8 and plain '+' characters will be replaced with spaces.
///
///A simplified example of how to parse `a=number&b=number`:
///
///```
///use rustful::{Context, Response};
///use rustful::context::ExtQueryBody;
///
///fn my_handler(mut context: Context, response: Response) {
/// //Parse the request body as a query string
/// let query = context.body.read_query_body().unwrap();
///
/// //Find "a" and "b" and assume that they are numbers
/// let a: f64 = query.get("a").and_then(|number| number.parse().ok()).unwrap();
/// let b: f64 = query.get("b").and_then(|number| number.parse().ok()).unwrap();
///
/// response.send(format!("{} + {} = {}", a, b, a + b));
///}
///```
#[inline]
fn read_query_body(&mut self) -> io::Result<HashMap<String, String>> {
let mut buf = Vec::new();
try!(self.read_to_end(&mut buf));
Ok(utils::parse_parameters(&buf))
}
}
///`BodyReader` extension for reading and parsing a JSON body.
///
///It is available by default and can be toggled using the `rustc_json_body`
///feature. Examples and more information can be found in [the documentation
///for `BodyReader`][body_reader].
///
///[body_reader]: struct.BodyReader.html
#[cfg(feature = "rustc_json_body")]
pub trait ExtJsonBody {
///Read the request body into a JSON structure.
fn read_json_body(&mut self) -> Result<json::Json, json::BuilderError>;
///Parse and decode the request body as some type `T`.
fn decode_json_body<T: Decodable>(&mut self) -> json::DecodeResult<T>;
}
#[cfg(feature = "rustc_json_body")]
impl<'a, 'b> ExtJsonBody for BodyReader<'a, 'b> {
///Read the request body into a generic JSON structure. This structure can
///then be navigated and parsed freely.
///
///A simplified example of how to parse `{ "a": number, "b": number }`:
///
///```
///use rustful::{Context, Response};
///use rustful::context::ExtJsonBody;
///
///fn my_handler(mut context: Context, response: Response) {
/// //Parse the request body as JSON
/// let json = context.body.read_json_body().unwrap();
///
/// //Find "a" and "b" in the root object and assume that they are numbers
/// let a = json.find("a").and_then(|number| number.as_f64()).unwrap();
/// let b = json.find("b").and_then(|number| number.as_f64()).unwrap();
///
/// response.send(format!("{} + {} = {}", a, b, a + b));
///}
///```
fn read_json_body(&mut self) -> Result<json::Json, json::BuilderError> {
json::Json::from_reader(self)
}
///Read and decode a request body as a type `T`. The target type must
///implement `rustc_serialize::Decodable`.
///
///A simplified example of how to parse `{ "a": number, "b": number }`:
///
///```
///extern crate rustful;
///extern crate rustc_serialize;
///
///use rustful::{Context, Response};
///use rustful::context::ExtJsonBody;
///
///#[derive(RustcDecodable)]
///struct Foo {
/// a: f64,
/// b: f64
///}
///
///fn my_handler(mut context: Context, response: Response) {
/// //Decode a JSON formatted request body into Foo
/// let foo: Foo = context.body.decode_json_body().unwrap();
///
/// response.send(format!("{} + {} = {}", foo.a, foo.b, foo.a + foo.b));
///}
///# fn main() {}
///```
fn decode_json_body<T: Decodable>(&mut self) -> json::DecodeResult<T> {
let mut buf = String::new();
try!(self.read_to_string(&mut buf).map_err(|e| {
let parse_err = json::ParserError::IoError(e);
json::DecoderError::ParseError(parse_err)
}));
json::decode(&buf)
}
}
impl<'a, 'b> Read for BodyReader<'a, 'b> {
///Read the request body.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.reader.read(buf)
}
}
///A specialized request representation for the multipart interface.
#[cfg(feature = "multipart")]
pub struct MultipartRequest<'r, 'a: 'r, 'b: 'a> {
boundary: &'r str,
reader: &'r mut HttpReader<&'a mut BufReader<&'b mut NetworkStream>>
}
#[cfg(feature = "multipart")]
impl<'r, 'a, 'b> HttpRequest for MultipartRequest<'r, 'a, 'b> {
fn multipart_boundary(&self) -> Option<&str> {
Some(self.boundary)
}
}
#[cfg(feature = "multipart")]
impl<'r, 'a, 'b> Read for MultipartRequest<'r, 'a, 'b> {
///Read the request body.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.reader.read(buf)
}
}
///Hypermedia connected to an API endpoint.
pub struct Hypermedia<'a> {
///Forward links from the current endpoint to other endpoints. These may
///include endpoints with the same path, but different method.
pub links: Vec<Link<'a>>
}
impl<'a> Hypermedia<'a> {
///Create an empty `Hypermedia` structure.
pub fn new() -> Hypermedia<'a> {
Hypermedia {
links: vec![]
}
}
}
///A hyperlink.
#[derive(PartialEq, Eq, Debug)]
pub struct Link<'a> {
///The HTTP method for which an endpoint is available. It can be left
///unspecified if the method doesn't matter.
pub method: Option<Method>,
///A relative path from the current location.
pub path: Vec<LinkSegment<'a>>
}
///A segment of a hyperlink path.
#[derive(PartialEq, Eq, Debug)]
pub enum LinkSegment<'a> {
///A static part of a path.
Static(&'a str),
///A dynamic part of a path. Can be substituted with anything.
Variable(&'a str),
///A recursive wildcard. Will recursively match anything.
RecursiveWildcard
}
| BodyReader | identifier_name |
local_cache.rs | // src/io/local_cache.rs -- a local cache of files obtained from another IoProvider
// Copyright 2017-2018 the Tectonic Project
// Licensed under the MIT License.
use fs2::FileExt;
use tempfile;
use std::collections::HashMap;
use std::ffi::{OsStr, OsString};
use std::fs::{self, File};
use std::io::{BufRead, BufReader, Read, Write};
use std::io::ErrorKind as IoErrorKind;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use digest::{self, Digest, DigestData};
use errors::{ErrorKind, Result};
use super::{try_open_file, Bundle, InputHandle, InputOrigin, IoProvider, OpenResult};
use status::StatusBackend;
struct LocalCacheItem {
_length: u64,
digest: Option<DigestData>, // None => negative cache: this file is not in the bundle
}
pub struct LocalCache<B: Bundle> {
backend: B,
digest_path: PathBuf,
cached_digest: DigestData,
checked_digest: bool,
manifest_path: PathBuf,
data_path: PathBuf,
contents: HashMap<OsString,LocalCacheItem>,
only_cached: bool,
}
impl<B: Bundle> LocalCache<B> {
pub fn new(
mut backend: B, digest: &Path, manifest_base: &Path,
data: &Path, only_cached: bool, status: &mut StatusBackend
) -> Result<LocalCache<B>> {
// If the `digest` file exists, we assume that it is valid; this is
// *essential* so that we can use a URL as our default IoProvider
// without requiring a network connection to run. If it does not
// exist, we need to query the backend.
let (digest_text, cached_digest, checked_digest) = match File::open(digest) {
Ok(f) => {
let mut text = String::new();
f.take(64).read_to_string(&mut text)?;
let cached_digest = ctry!(DigestData::from_str(&text); "corrupted SHA256 digest cache");
(text, cached_digest, false)
},
Err(e) => {
if e.kind()!= IoErrorKind::NotFound {
// Unexpected error reading the digest cache file. Ruh roh!
return Err(e.into());
}
// Digest file just doesn't exist. We need to query the backend for it.
let cached_digest = ctry!(backend.get_digest(status); "could not get backend summary digest");
let text = cached_digest.to_string();
(text, cached_digest, true)
}
};
if checked_digest {
// If checked_digest is true, the digest cache file did not exist
// and we got the text fresh from the backend. So, we should write
// it out to the cache file.
let mut f = File::create(&digest)?;
writeln!(f, "{}", digest_text)?;
}
// We can now figure out which manifest to use.
let mut manifest_path = manifest_base.to_owned();
manifest_path.push(&digest_text);
manifest_path.set_extension("txt");
// Read it in, if it exists.
let mut contents = HashMap::new();
match try_open_file(&manifest_path) {
OpenResult::NotAvailable => {},
OpenResult::Err(e) => { return Err(e.into()); },
OpenResult::Ok(mfile) => {
// Note that the lock is released when the file is closed,
// which is good since BufReader::new() and BufReader::lines()
// consume their objects.
if let Err(e) = mfile.lock_shared() {
tt_warning!(status, "failed to lock manifest file \"{}\" for reading; this might be fine",
manifest_path.display(); e.into());
}
let f = BufReader::new(mfile);
for res in f.lines() {
let line = res?;
let mut bits = line.rsplitn(3,'');
let (original_name, length, digest) = match (bits.next(), bits.next(),
bits.next(), bits.next()) {
(Some(s), Some(t), Some(r), None) => (r, t, s),
_ => continue,
};
let name = OsString::from(original_name);
let length = match length.parse::<u64>() {
Ok(l) => l,
Err(_) => continue
};
let digest = if digest == "-" {
None
} else {
match DigestData::from_str(&digest) {
Ok(d) => Some(d),
Err(e) => {
tt_warning!(status, "ignoring bad digest data \"{}\" for \"{}\" in \"{}\"",
&digest, original_name, manifest_path.display() ; e);
continue;
}
}
};
contents.insert(name, LocalCacheItem { _length: length, digest: digest });
}
}
}
// All set.
Ok(LocalCache {
backend: backend,
digest_path: digest.to_owned(),
cached_digest: cached_digest,
checked_digest: checked_digest,
manifest_path: manifest_path,
data_path: data.to_owned(),
contents: contents,
only_cached: only_cached,
})
}
fn record_cache_result(&mut self, name: &OsStr, length: u64, digest: Option<DigestData>) -> Result<()> {
let digest_text = match digest {
Some(ref d) => d.to_string(),
None => "-".to_owned(),
};
// Due to a quirk about permissions for file locking on Windows, we
// need to add `.read(true)` to be able to lock a file opened in
// append mode.
let mut man = fs::OpenOptions::new()
.append(true)
.create(true)
.read(true)
.open(&self.manifest_path)?;
// Lock will be released when file is closed at the end of this function.
ctry!(man.lock_exclusive(); "failed to lock manifest file \"{}\" for writing", self.manifest_path.display());
if let Some(name_utf8) = name.to_str() {
if!name_utf8.contains(|c| c == '\n' || c == '\r') {
writeln!(man, "{} {} {}", name_utf8, length, digest_text)?;
}
}
self.contents.insert(name.to_owned(), LocalCacheItem { _length: length, digest: digest });
Ok(())
}
/// If we're going to make a request of the backend, we should check that
/// its digest is what we expect. If not, we do a lame thing where we
/// error out but set things up so that things should succeed if the
/// program is re-run. Exactly the lame TeX user experience that I've been
/// trying to avoid!
fn check_digest(&mut self, status: &mut StatusBackend) -> Result<()> {
if self.checked_digest {
return Ok(());
}
let dtext = match self.backend.input_open_name(OsStr::new("SHA256SUM"), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
ctry!(h.take(64).read_to_string(&mut text); "error reading {}", self.digest_path.to_string_lossy());
text
},
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg("backend does not provide needed SHA256SUM file".to_owned()).into());
},
OpenResult::Err(e) => {
return Err(e.into());
}
};
let current_digest = ctry!(DigestData::from_str(&dtext); "bad SHA256 digest from backend");
if self.cached_digest!= current_digest {
// Crap! The backend isn't what we thought it was. Rewrite the
// digest file so that next time we'll start afresh.
let mut f = ctry!(File::create(&self.digest_path); "couldn\'t open {} for writing",
self.digest_path.to_string_lossy());
ctry!(writeln!(f, "{}", current_digest.to_string()); "couldn\'t write to {}",
self.digest_path.to_string_lossy());
return Err(ErrorKind::Msg("backend digest changed; rerun to use updated information".to_owned()).into());
}
// Phew, the backend hasn't changed. Don't check again.
self.checked_digest = true;
Ok(())
}
fn path_for_name(&mut self, name: &OsStr, status: &mut StatusBackend) -> OpenResult<PathBuf> {
if let Some(info) = self.contents.get(name) {
return match info.digest {
None => OpenResult::NotAvailable,
Some(ref d) => match d.create_two_part_path(&self.data_path) {
Ok(p) => OpenResult::Ok(p),
Err(e) => OpenResult::Err(e.into()),
},
};
}
// The file is not in the cache and we are asked not to try to fetch it.
if self.only_cached {
return OpenResult::NotAvailable;
}
// Bummer, we haven't seen this file before. We need to (try to) fetch
// the item from the backend, saving it to disk and calculating its
// digest ourselves, then enter it in the cache and in our manifest.
// Fun times. Because we're touching the backend, we need to verify that
// its digest is what we think.
if let Err(e) = self.check_digest(status) {
return OpenResult::Err(e);
}
// The bundle's overall digest is OK. Now try open the file. If it's
// not available, cache that result, since LaTeX compilations commonly
// touch nonexistent files. If we didn't maintain the negative cache,
// we'd have to touch the network for virtually every compilation.
let mut stream = match self.backend.input_open_name (name, status) {
OpenResult::Ok(s) => s,
OpenResult::Err(e) => return OpenResult::Err(e),
OpenResult::NotAvailable => {
if let Err(e) = self.record_cache_result(name, 0, None) |
return OpenResult::NotAvailable;
}
};
// OK, we can stream the file to a temporary location on disk,
// computing its SHA256 as we go.
let mut digest_builder = digest::create();
let mut length = 0;
let mut temp_dest = match tempfile::Builder::new()
.prefix("download_")
.rand_bytes(6)
.tempfile_in(&self.data_path) {
Ok(f) => f,
Err(e) => return OpenResult::Err(e.into()),
};
let mut buf = [0u8; 8192];
while let Ok(nbytes) = stream.read(&mut buf) {
if nbytes == 0 {
break;
}
length += nbytes;
let chunk = &buf[..nbytes];
digest_builder.input(chunk);
if let Err(e) = temp_dest.write_all(chunk) {
return OpenResult::Err(e.into());
}
}
let digest = DigestData::from(digest_builder);
// Now we can almost move it to its final destination..
let final_path = match digest.create_two_part_path(&self.data_path) {
Ok(p) => p,
Err(e) => return OpenResult::Err(e.into()),
};
// Perform a racy check for the destination existing, because this
// matters on Windows: if the destination is already there, we'll get
// an error because the destination is marked read-only. Assuming
// non-pathological filesystem manipulation, though, we'll only be
// subject to the race once.
if!final_path.exists() {
if let Err(e) = temp_dest.persist(&final_path) {
return OpenResult::Err(e.error.into());
}
// Now we can make the file readonly. It would be nice to set the
// permissions using the already-open file handle owned by the
// tempfile, but mkstemp doesn't give us access.
let mut perms = match fs::metadata(&final_path) {
Ok(p) => p,
Err(e) => {
return OpenResult::Err(e.into());
}
}.permissions();
perms.set_readonly(true);
if let Err(e) = fs::set_permissions(&final_path, perms) {
return OpenResult::Err(e.into());
}
}
// And finally add a record of this file to our manifest. Note that
// we're opening and closing this file every time we load a new file;
// not so efficient, but whatever.
if let Err(e) = self.record_cache_result(name, length as u64, Some(digest)) {
return OpenResult::Err(e.into());
}
OpenResult::Ok(final_path)
}
}
impl<B: Bundle> IoProvider for LocalCache<B> {
fn input_open_name(&mut self, name: &OsStr, status: &mut StatusBackend) -> OpenResult<InputHandle> {
let path = match self.path_for_name(name, status) {
OpenResult::Ok(p) => p,
OpenResult::NotAvailable => return OpenResult::NotAvailable,
OpenResult::Err(e) => return OpenResult::Err(e),
};
let f = match File::open(&path) {
Ok(f) => f,
Err(e) => return OpenResult::Err(e.into())
};
OpenResult::Ok(InputHandle::new(name, BufReader::new(f), InputOrigin::Other))
}
}
impl<B: Bundle> Bundle for LocalCache<B> {
fn get_digest(&mut self, _status: &mut StatusBackend) -> Result<DigestData> {
Ok(self.cached_digest)
}
}
| {
return OpenResult::Err(e.into());
} | conditional_block |
local_cache.rs | // src/io/local_cache.rs -- a local cache of files obtained from another IoProvider
// Copyright 2017-2018 the Tectonic Project
// Licensed under the MIT License.
use fs2::FileExt;
use tempfile;
use std::collections::HashMap;
use std::ffi::{OsStr, OsString};
use std::fs::{self, File};
use std::io::{BufRead, BufReader, Read, Write};
use std::io::ErrorKind as IoErrorKind;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use digest::{self, Digest, DigestData};
use errors::{ErrorKind, Result};
use super::{try_open_file, Bundle, InputHandle, InputOrigin, IoProvider, OpenResult};
use status::StatusBackend;
struct LocalCacheItem {
_length: u64,
digest: Option<DigestData>, // None => negative cache: this file is not in the bundle
}
pub struct LocalCache<B: Bundle> {
backend: B,
digest_path: PathBuf,
cached_digest: DigestData,
checked_digest: bool,
manifest_path: PathBuf,
data_path: PathBuf,
contents: HashMap<OsString,LocalCacheItem>,
only_cached: bool,
}
impl<B: Bundle> LocalCache<B> {
pub fn new(
mut backend: B, digest: &Path, manifest_base: &Path,
data: &Path, only_cached: bool, status: &mut StatusBackend
) -> Result<LocalCache<B>> {
// If the `digest` file exists, we assume that it is valid; this is
// *essential* so that we can use a URL as our default IoProvider
// without requiring a network connection to run. If it does not
// exist, we need to query the backend.
let (digest_text, cached_digest, checked_digest) = match File::open(digest) {
Ok(f) => {
let mut text = String::new();
f.take(64).read_to_string(&mut text)?;
let cached_digest = ctry!(DigestData::from_str(&text); "corrupted SHA256 digest cache");
(text, cached_digest, false)
},
Err(e) => {
if e.kind()!= IoErrorKind::NotFound {
// Unexpected error reading the digest cache file. Ruh roh!
return Err(e.into());
}
// Digest file just doesn't exist. We need to query the backend for it.
let cached_digest = ctry!(backend.get_digest(status); "could not get backend summary digest");
let text = cached_digest.to_string();
(text, cached_digest, true)
}
};
if checked_digest {
// If checked_digest is true, the digest cache file did not exist
// and we got the text fresh from the backend. So, we should write
// it out to the cache file.
let mut f = File::create(&digest)?;
writeln!(f, "{}", digest_text)?;
}
// We can now figure out which manifest to use.
let mut manifest_path = manifest_base.to_owned();
manifest_path.push(&digest_text);
manifest_path.set_extension("txt");
// Read it in, if it exists.
let mut contents = HashMap::new();
match try_open_file(&manifest_path) {
OpenResult::NotAvailable => {},
OpenResult::Err(e) => { return Err(e.into()); },
OpenResult::Ok(mfile) => {
// Note that the lock is released when the file is closed,
// which is good since BufReader::new() and BufReader::lines()
// consume their objects.
if let Err(e) = mfile.lock_shared() {
tt_warning!(status, "failed to lock manifest file \"{}\" for reading; this might be fine",
manifest_path.display(); e.into());
}
let f = BufReader::new(mfile);
for res in f.lines() {
let line = res?;
let mut bits = line.rsplitn(3,'');
let (original_name, length, digest) = match (bits.next(), bits.next(),
bits.next(), bits.next()) {
(Some(s), Some(t), Some(r), None) => (r, t, s),
_ => continue,
};
let name = OsString::from(original_name);
let length = match length.parse::<u64>() {
Ok(l) => l,
Err(_) => continue
};
let digest = if digest == "-" {
None
} else {
match DigestData::from_str(&digest) {
Ok(d) => Some(d),
Err(e) => {
tt_warning!(status, "ignoring bad digest data \"{}\" for \"{}\" in \"{}\"",
&digest, original_name, manifest_path.display() ; e);
continue;
}
}
};
contents.insert(name, LocalCacheItem { _length: length, digest: digest });
}
}
}
// All set.
Ok(LocalCache {
backend: backend,
digest_path: digest.to_owned(),
cached_digest: cached_digest,
checked_digest: checked_digest,
manifest_path: manifest_path,
data_path: data.to_owned(),
contents: contents,
only_cached: only_cached,
})
}
fn | (&mut self, name: &OsStr, length: u64, digest: Option<DigestData>) -> Result<()> {
let digest_text = match digest {
Some(ref d) => d.to_string(),
None => "-".to_owned(),
};
// Due to a quirk about permissions for file locking on Windows, we
// need to add `.read(true)` to be able to lock a file opened in
// append mode.
let mut man = fs::OpenOptions::new()
.append(true)
.create(true)
.read(true)
.open(&self.manifest_path)?;
// Lock will be released when file is closed at the end of this function.
ctry!(man.lock_exclusive(); "failed to lock manifest file \"{}\" for writing", self.manifest_path.display());
if let Some(name_utf8) = name.to_str() {
if!name_utf8.contains(|c| c == '\n' || c == '\r') {
writeln!(man, "{} {} {}", name_utf8, length, digest_text)?;
}
}
self.contents.insert(name.to_owned(), LocalCacheItem { _length: length, digest: digest });
Ok(())
}
/// If we're going to make a request of the backend, we should check that
/// its digest is what we expect. If not, we do a lame thing where we
/// error out but set things up so that things should succeed if the
/// program is re-run. Exactly the lame TeX user experience that I've been
/// trying to avoid!
fn check_digest(&mut self, status: &mut StatusBackend) -> Result<()> {
if self.checked_digest {
return Ok(());
}
let dtext = match self.backend.input_open_name(OsStr::new("SHA256SUM"), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
ctry!(h.take(64).read_to_string(&mut text); "error reading {}", self.digest_path.to_string_lossy());
text
},
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg("backend does not provide needed SHA256SUM file".to_owned()).into());
},
OpenResult::Err(e) => {
return Err(e.into());
}
};
let current_digest = ctry!(DigestData::from_str(&dtext); "bad SHA256 digest from backend");
if self.cached_digest!= current_digest {
// Crap! The backend isn't what we thought it was. Rewrite the
// digest file so that next time we'll start afresh.
let mut f = ctry!(File::create(&self.digest_path); "couldn\'t open {} for writing",
self.digest_path.to_string_lossy());
ctry!(writeln!(f, "{}", current_digest.to_string()); "couldn\'t write to {}",
self.digest_path.to_string_lossy());
return Err(ErrorKind::Msg("backend digest changed; rerun to use updated information".to_owned()).into());
}
// Phew, the backend hasn't changed. Don't check again.
self.checked_digest = true;
Ok(())
}
fn path_for_name(&mut self, name: &OsStr, status: &mut StatusBackend) -> OpenResult<PathBuf> {
if let Some(info) = self.contents.get(name) {
return match info.digest {
None => OpenResult::NotAvailable,
Some(ref d) => match d.create_two_part_path(&self.data_path) {
Ok(p) => OpenResult::Ok(p),
Err(e) => OpenResult::Err(e.into()),
},
};
}
// The file is not in the cache and we are asked not to try to fetch it.
if self.only_cached {
return OpenResult::NotAvailable;
}
// Bummer, we haven't seen this file before. We need to (try to) fetch
// the item from the backend, saving it to disk and calculating its
// digest ourselves, then enter it in the cache and in our manifest.
// Fun times. Because we're touching the backend, we need to verify that
// its digest is what we think.
if let Err(e) = self.check_digest(status) {
return OpenResult::Err(e);
}
// The bundle's overall digest is OK. Now try open the file. If it's
// not available, cache that result, since LaTeX compilations commonly
// touch nonexistent files. If we didn't maintain the negative cache,
// we'd have to touch the network for virtually every compilation.
let mut stream = match self.backend.input_open_name (name, status) {
OpenResult::Ok(s) => s,
OpenResult::Err(e) => return OpenResult::Err(e),
OpenResult::NotAvailable => {
if let Err(e) = self.record_cache_result(name, 0, None) {
return OpenResult::Err(e.into());
}
return OpenResult::NotAvailable;
}
};
// OK, we can stream the file to a temporary location on disk,
// computing its SHA256 as we go.
let mut digest_builder = digest::create();
let mut length = 0;
let mut temp_dest = match tempfile::Builder::new()
.prefix("download_")
.rand_bytes(6)
.tempfile_in(&self.data_path) {
Ok(f) => f,
Err(e) => return OpenResult::Err(e.into()),
};
let mut buf = [0u8; 8192];
while let Ok(nbytes) = stream.read(&mut buf) {
if nbytes == 0 {
break;
}
length += nbytes;
let chunk = &buf[..nbytes];
digest_builder.input(chunk);
if let Err(e) = temp_dest.write_all(chunk) {
return OpenResult::Err(e.into());
}
}
let digest = DigestData::from(digest_builder);
// Now we can almost move it to its final destination..
let final_path = match digest.create_two_part_path(&self.data_path) {
Ok(p) => p,
Err(e) => return OpenResult::Err(e.into()),
};
// Perform a racy check for the destination existing, because this
// matters on Windows: if the destination is already there, we'll get
// an error because the destination is marked read-only. Assuming
// non-pathological filesystem manipulation, though, we'll only be
// subject to the race once.
if!final_path.exists() {
if let Err(e) = temp_dest.persist(&final_path) {
return OpenResult::Err(e.error.into());
}
// Now we can make the file readonly. It would be nice to set the
// permissions using the already-open file handle owned by the
// tempfile, but mkstemp doesn't give us access.
let mut perms = match fs::metadata(&final_path) {
Ok(p) => p,
Err(e) => {
return OpenResult::Err(e.into());
}
}.permissions();
perms.set_readonly(true);
if let Err(e) = fs::set_permissions(&final_path, perms) {
return OpenResult::Err(e.into());
}
}
// And finally add a record of this file to our manifest. Note that
// we're opening and closing this file every time we load a new file;
// not so efficient, but whatever.
if let Err(e) = self.record_cache_result(name, length as u64, Some(digest)) {
return OpenResult::Err(e.into());
}
OpenResult::Ok(final_path)
}
}
impl<B: Bundle> IoProvider for LocalCache<B> {
fn input_open_name(&mut self, name: &OsStr, status: &mut StatusBackend) -> OpenResult<InputHandle> {
let path = match self.path_for_name(name, status) {
OpenResult::Ok(p) => p,
OpenResult::NotAvailable => return OpenResult::NotAvailable,
OpenResult::Err(e) => return OpenResult::Err(e),
};
let f = match File::open(&path) {
Ok(f) => f,
Err(e) => return OpenResult::Err(e.into())
};
OpenResult::Ok(InputHandle::new(name, BufReader::new(f), InputOrigin::Other))
}
}
impl<B: Bundle> Bundle for LocalCache<B> {
fn get_digest(&mut self, _status: &mut StatusBackend) -> Result<DigestData> {
Ok(self.cached_digest)
}
}
| record_cache_result | identifier_name |
local_cache.rs | // src/io/local_cache.rs -- a local cache of files obtained from another IoProvider
// Copyright 2017-2018 the Tectonic Project
// Licensed under the MIT License.
use fs2::FileExt;
use tempfile;
use std::collections::HashMap;
use std::ffi::{OsStr, OsString};
use std::fs::{self, File};
use std::io::{BufRead, BufReader, Read, Write};
use std::io::ErrorKind as IoErrorKind;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use digest::{self, Digest, DigestData};
use errors::{ErrorKind, Result};
use super::{try_open_file, Bundle, InputHandle, InputOrigin, IoProvider, OpenResult};
use status::StatusBackend;
struct LocalCacheItem {
_length: u64,
digest: Option<DigestData>, // None => negative cache: this file is not in the bundle
}
pub struct LocalCache<B: Bundle> {
backend: B,
digest_path: PathBuf,
cached_digest: DigestData,
checked_digest: bool,
manifest_path: PathBuf,
data_path: PathBuf,
contents: HashMap<OsString,LocalCacheItem>,
only_cached: bool,
}
impl<B: Bundle> LocalCache<B> {
pub fn new(
mut backend: B, digest: &Path, manifest_base: &Path,
data: &Path, only_cached: bool, status: &mut StatusBackend
) -> Result<LocalCache<B>> {
// If the `digest` file exists, we assume that it is valid; this is
// *essential* so that we can use a URL as our default IoProvider
// without requiring a network connection to run. If it does not
// exist, we need to query the backend.
let (digest_text, cached_digest, checked_digest) = match File::open(digest) {
Ok(f) => {
let mut text = String::new();
f.take(64).read_to_string(&mut text)?;
let cached_digest = ctry!(DigestData::from_str(&text); "corrupted SHA256 digest cache");
(text, cached_digest, false)
},
Err(e) => {
if e.kind()!= IoErrorKind::NotFound {
// Unexpected error reading the digest cache file. Ruh roh!
return Err(e.into());
}
// Digest file just doesn't exist. We need to query the backend for it.
let cached_digest = ctry!(backend.get_digest(status); "could not get backend summary digest");
let text = cached_digest.to_string();
(text, cached_digest, true)
}
};
if checked_digest {
// If checked_digest is true, the digest cache file did not exist
// and we got the text fresh from the backend. So, we should write
// it out to the cache file.
let mut f = File::create(&digest)?;
writeln!(f, "{}", digest_text)?;
}
// We can now figure out which manifest to use.
let mut manifest_path = manifest_base.to_owned();
manifest_path.push(&digest_text);
manifest_path.set_extension("txt");
// Read it in, if it exists.
let mut contents = HashMap::new();
match try_open_file(&manifest_path) {
OpenResult::NotAvailable => {},
OpenResult::Err(e) => { return Err(e.into()); },
OpenResult::Ok(mfile) => {
// Note that the lock is released when the file is closed,
// which is good since BufReader::new() and BufReader::lines()
// consume their objects.
if let Err(e) = mfile.lock_shared() {
tt_warning!(status, "failed to lock manifest file \"{}\" for reading; this might be fine",
manifest_path.display(); e.into());
}
let f = BufReader::new(mfile);
for res in f.lines() {
let line = res?;
let mut bits = line.rsplitn(3,'');
let (original_name, length, digest) = match (bits.next(), bits.next(),
bits.next(), bits.next()) {
(Some(s), Some(t), Some(r), None) => (r, t, s),
_ => continue,
};
let name = OsString::from(original_name);
let length = match length.parse::<u64>() {
Ok(l) => l,
Err(_) => continue
};
let digest = if digest == "-" {
None
} else {
match DigestData::from_str(&digest) {
Ok(d) => Some(d),
Err(e) => {
tt_warning!(status, "ignoring bad digest data \"{}\" for \"{}\" in \"{}\"",
&digest, original_name, manifest_path.display() ; e);
continue;
}
}
};
contents.insert(name, LocalCacheItem { _length: length, digest: digest });
}
}
}
// All set.
Ok(LocalCache {
backend: backend,
digest_path: digest.to_owned(),
cached_digest: cached_digest,
checked_digest: checked_digest,
manifest_path: manifest_path,
data_path: data.to_owned(),
contents: contents,
only_cached: only_cached,
})
}
fn record_cache_result(&mut self, name: &OsStr, length: u64, digest: Option<DigestData>) -> Result<()> {
let digest_text = match digest {
Some(ref d) => d.to_string(),
None => "-".to_owned(),
};
// Due to a quirk about permissions for file locking on Windows, we
// need to add `.read(true)` to be able to lock a file opened in
// append mode.
let mut man = fs::OpenOptions::new()
.append(true)
.create(true)
.read(true)
.open(&self.manifest_path)?;
// Lock will be released when file is closed at the end of this function.
ctry!(man.lock_exclusive(); "failed to lock manifest file \"{}\" for writing", self.manifest_path.display());
if let Some(name_utf8) = name.to_str() {
if!name_utf8.contains(|c| c == '\n' || c == '\r') {
writeln!(man, "{} {} {}", name_utf8, length, digest_text)?;
}
}
self.contents.insert(name.to_owned(), LocalCacheItem { _length: length, digest: digest });
Ok(())
}
/// If we're going to make a request of the backend, we should check that
/// its digest is what we expect. If not, we do a lame thing where we
/// error out but set things up so that things should succeed if the
/// program is re-run. Exactly the lame TeX user experience that I've been
/// trying to avoid!
fn check_digest(&mut self, status: &mut StatusBackend) -> Result<()> {
if self.checked_digest {
return Ok(());
}
let dtext = match self.backend.input_open_name(OsStr::new("SHA256SUM"), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
ctry!(h.take(64).read_to_string(&mut text); "error reading {}", self.digest_path.to_string_lossy());
text
},
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg("backend does not provide needed SHA256SUM file".to_owned()).into());
},
OpenResult::Err(e) => {
return Err(e.into());
}
};
let current_digest = ctry!(DigestData::from_str(&dtext); "bad SHA256 digest from backend");
if self.cached_digest!= current_digest {
// Crap! The backend isn't what we thought it was. Rewrite the
// digest file so that next time we'll start afresh.
let mut f = ctry!(File::create(&self.digest_path); "couldn\'t open {} for writing",
self.digest_path.to_string_lossy());
ctry!(writeln!(f, "{}", current_digest.to_string()); "couldn\'t write to {}",
self.digest_path.to_string_lossy());
return Err(ErrorKind::Msg("backend digest changed; rerun to use updated information".to_owned()).into());
}
// Phew, the backend hasn't changed. Don't check again.
self.checked_digest = true;
Ok(())
}
fn path_for_name(&mut self, name: &OsStr, status: &mut StatusBackend) -> OpenResult<PathBuf> {
if let Some(info) = self.contents.get(name) {
return match info.digest {
None => OpenResult::NotAvailable,
Some(ref d) => match d.create_two_part_path(&self.data_path) {
Ok(p) => OpenResult::Ok(p),
Err(e) => OpenResult::Err(e.into()),
},
};
}
// The file is not in the cache and we are asked not to try to fetch it.
if self.only_cached {
return OpenResult::NotAvailable;
}
// Bummer, we haven't seen this file before. We need to (try to) fetch
// the item from the backend, saving it to disk and calculating its
// digest ourselves, then enter it in the cache and in our manifest.
// Fun times. Because we're touching the backend, we need to verify that
// its digest is what we think.
if let Err(e) = self.check_digest(status) {
return OpenResult::Err(e);
}
// The bundle's overall digest is OK. Now try open the file. If it's
// not available, cache that result, since LaTeX compilations commonly
// touch nonexistent files. If we didn't maintain the negative cache,
// we'd have to touch the network for virtually every compilation.
let mut stream = match self.backend.input_open_name (name, status) {
OpenResult::Ok(s) => s,
OpenResult::Err(e) => return OpenResult::Err(e),
OpenResult::NotAvailable => {
if let Err(e) = self.record_cache_result(name, 0, None) {
return OpenResult::Err(e.into());
}
return OpenResult::NotAvailable;
}
};
// OK, we can stream the file to a temporary location on disk, | let mut temp_dest = match tempfile::Builder::new()
.prefix("download_")
.rand_bytes(6)
.tempfile_in(&self.data_path) {
Ok(f) => f,
Err(e) => return OpenResult::Err(e.into()),
};
let mut buf = [0u8; 8192];
while let Ok(nbytes) = stream.read(&mut buf) {
if nbytes == 0 {
break;
}
length += nbytes;
let chunk = &buf[..nbytes];
digest_builder.input(chunk);
if let Err(e) = temp_dest.write_all(chunk) {
return OpenResult::Err(e.into());
}
}
let digest = DigestData::from(digest_builder);
// Now we can almost move it to its final destination..
let final_path = match digest.create_two_part_path(&self.data_path) {
Ok(p) => p,
Err(e) => return OpenResult::Err(e.into()),
};
// Perform a racy check for the destination existing, because this
// matters on Windows: if the destination is already there, we'll get
// an error because the destination is marked read-only. Assuming
// non-pathological filesystem manipulation, though, we'll only be
// subject to the race once.
if!final_path.exists() {
if let Err(e) = temp_dest.persist(&final_path) {
return OpenResult::Err(e.error.into());
}
// Now we can make the file readonly. It would be nice to set the
// permissions using the already-open file handle owned by the
// tempfile, but mkstemp doesn't give us access.
let mut perms = match fs::metadata(&final_path) {
Ok(p) => p,
Err(e) => {
return OpenResult::Err(e.into());
}
}.permissions();
perms.set_readonly(true);
if let Err(e) = fs::set_permissions(&final_path, perms) {
return OpenResult::Err(e.into());
}
}
// And finally add a record of this file to our manifest. Note that
// we're opening and closing this file every time we load a new file;
// not so efficient, but whatever.
if let Err(e) = self.record_cache_result(name, length as u64, Some(digest)) {
return OpenResult::Err(e.into());
}
OpenResult::Ok(final_path)
}
}
impl<B: Bundle> IoProvider for LocalCache<B> {
fn input_open_name(&mut self, name: &OsStr, status: &mut StatusBackend) -> OpenResult<InputHandle> {
let path = match self.path_for_name(name, status) {
OpenResult::Ok(p) => p,
OpenResult::NotAvailable => return OpenResult::NotAvailable,
OpenResult::Err(e) => return OpenResult::Err(e),
};
let f = match File::open(&path) {
Ok(f) => f,
Err(e) => return OpenResult::Err(e.into())
};
OpenResult::Ok(InputHandle::new(name, BufReader::new(f), InputOrigin::Other))
}
}
impl<B: Bundle> Bundle for LocalCache<B> {
fn get_digest(&mut self, _status: &mut StatusBackend) -> Result<DigestData> {
Ok(self.cached_digest)
}
} | // computing its SHA256 as we go.
let mut digest_builder = digest::create();
let mut length = 0;
| random_line_split |
mod.rs | /*
* Copyright (c) 2017-2018 Boucher, Antoni <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
macro_rules! get_document {
($_self:ident) => {{
let document = $_self.model.page.dom_document();
if let Some(document) = document {
document
}
else {
return;
}
}};
}
mod marks;
mod scroll;
use std::collections::HashMap;
use std::f32;
use std::sync::Mutex;
use gio::Cancellable;
use glib::{Cast, Closure, ObjectExt, ToVariant};
use regex::Regex;
use relm::{Relm, Update, UpdateNew};
use webkit2gtk_webextension::{
traits::{
DOMDocumentExt,
DOMDOMSelectionExt,
DOMDOMWindowExt,
DOMElementExt,
DOMEventTargetExt,
DOMHTMLElementExt,
DOMHTMLInputElementExt,
DOMNodeExt,
WebPageExt,
},
DOMElement,
DOMHTMLElement,
DOMHTMLInputElement,
DOMHTMLSelectElement,
DOMHTMLTextAreaElement,
UserMessage,
WebPage,
};
use titanium_common::{FollowMode, InnerMessage, protocol::encode};
use titanium_common::Action::{
self,
CopyLink,
DownloadLink,
FileInput,
GoInInsertMode,
NoAction,
};
use titanium_common::InnerMessage::*;
use dom::{
get_body,
get_elements_by_tag_name_in_all_frames,
get_hints_container,
get_href,
get_position,
is_enabled,
is_hidden,
is_text_input,
mouse_down,
click,
mouse_out,
mouse_over,
match_pattern,
};
use hints::{create_hints, hide_unrelevant_hints, show_all_hints, HINTS_ID};
use login_form::{get_credentials, load_password, load_username, submit_login_form};
use self::Msg::*;
pub struct Executor {
model: Model,
}
pub struct Model {
activated_file_input: Option<DOMHTMLInputElement>,
hint_keys: String,
hint_map: HashMap<String, DOMElement>,
last_hovered_element: Option<DOMElement>,
marks: HashMap<u8, u32>, // Byte to percent.
page: WebPage,
relm: Relm<Executor>,
scroll_element: Option<DOMElement>,
}
#[derive(Msg)]
pub enum Msg {
DocumentLoaded,
MessageRecv(InnerMessage),
Scroll,
}
impl Update for Executor {
type Model = Model;
type ModelParam = WebPage;
type Msg = Msg;
fn model(relm: &Relm<Self>, page: WebPage) -> Model {
Model {
activated_file_input: None,
hint_keys: String::new(),
hint_map: HashMap::new(),
last_hovered_element: None,
marks: HashMap::new(),
page,
relm: relm.clone(),
scroll_element: None,
}
}
fn update(&mut self, message: Msg) {
match message {
DocumentLoaded => {
self.init_scroll_element();
self.send_scroll_percentage();
let stream = self.model.relm.stream().clone();
let stream = Mutex::new(::send_cell::SendCell::new(stream));
let handler = Closure::new(move |_| {
let stream = stream.lock().unwrap();
stream.get().emit(Scroll);
None
});
if self.model.scroll_element == get_body(&self.model.page).map(|el| el.upcast()) {
let document = wtry_opt_no_ret!(self.model.page.dom_document());
document.add_event_listener_with_closure("scroll", &handler, false);
}
else {
let element = self.model.scroll_element.as_ref().unwrap();
element.add_event_listener_with_closure("scroll", &handler, false);
}
},
MessageRecv(msg) =>
match msg {
ActivateHint(follow_mode, ctrl_key) => self.activate_hint(follow_mode, ctrl_key),
ActivateSelection() => self.activate_selection(),
ClickNextPage() => self.click_next_page(),
ClickPrevPage() => self.click_prev_page(),
EnterHintKey(key) => self.enter_hint_key(key),
FocusInput() => self.focus_input(),
GetCredentials() => self.send_credentials(),
GoToMark(mark) => self.go_to_mark(mark),
HideHints() => self.hide_hints(),
InsertText(text) => self.insert_text(&text),
LoadUsernamePass(username, password) => self.load_username_pass(&username, &password),
Mark(char) => self.add_mark(char),
ResetMarks() => self.reset_marks(),
ResetScrollElement() => self.reset_scroll_element(),
ScrollBy(pixels) => self.scroll_by(pixels),
ScrollByX(pixels) => self.scroll_by_x(pixels),
ScrollTop() => self.scroll_top(),
ScrollToPercent(percent) => self.scroll_to_percent(percent),
SelectFile(file) => self.select_file(&file),
ShowHints(hint_chars) => self.show_hints(&hint_chars),
SubmitLoginForm() => self.submit_login_form(),
_ => warn!("Unexpected message received: {:?}", msg),
},
Scroll => self.send_scroll_percentage(),
}
}
}
impl UpdateNew for Executor {
fn new(_relm: &Relm<Self>, model: Model) -> Self {
Executor {
model,
}
}
}
impl Executor {
// Activate (click, focus, hover) the selected hint.
fn activate_hint(&mut self, follow_mode: FollowMode, ctrl_key: bool) {
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
match element {
Some(element) => {
self.hide_hints();
self.model.hint_map.clear();
self.model.hint_keys.clear();
let action =
match follow_mode {
FollowMode::Click => self.click(element, ctrl_key),
FollowMode::CopyLink => self.copy_link(element),
FollowMode::Download => self.download_link(element),
FollowMode::Hover => self.hover(element),
};
self.send(ActivateAction(action));
},
None => self.send(ActivateAction(NoAction)),
}
}
// Click on the link of the selected text.
fn activate_selection(&self) {
// TODO: switch to using some macros to simplify this code.
let result = self.model.page.dom_document()
.and_then(|document| document.default_view())
.and_then(|window| window.selection())
.and_then(|selection| selection.anchor_node())
.and_then(|anchor_node| anchor_node.parent_element())
.and_then(|parent| parent.downcast::<DOMHTMLElement>().ok());
if let Some(parent) = result {
parent.click();
}
}
fn click(&mut self, element: DOMHTMLElement, ctrl_key: bool) -> Action {
if let Ok(input_element) = element.clone().downcast::<DOMHTMLInputElement>() {
let input_type = input_element.input_type().map(|string| string.to_string()).unwrap_or_default();
match input_type.as_ref() {
"button" | "checkbox" | "image" | "radio" | "reset" | "submit" => {
click(&element.upcast(), ctrl_key);
NoAction
},
// FIXME: file and color not opening.
"color" => NoAction,
"file" => {
self.model.activated_file_input = Some(input_element);
FileInput
},
_ => {
element.focus();
GoInInsertMode
},
}
}
else if element.is::<DOMHTMLTextAreaElement>() {
element.focus();
GoInInsertMode
}
else if element.is::<DOMHTMLSelectElement>() {
if element.attribute("multiple").is_some() {
element.focus();
GoInInsertMode
}
else {
mouse_down(&element.upcast());
NoAction
}
}
else {
click(&element.upcast(), ctrl_key);
NoAction
}
}
fn copy_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
CopyLink(href)
}
fn click_next_page(&mut self) {
let regex = Regex::new(r"(?i:next|forward|older|more|›|»)|(?:<.+>)>(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: Check if url (not text) is *very* similar to our current one
// example.com/page/4 => example.com/page/5
warn!("No next link found");
}
}
fn click_prev_page(&mut self) {
let regex = Regex::new(r"(?i:prev(ious)|back|newer|less|«|‹)|(?:<.+>)<(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: See above
warn!("No previous link found");
}
}
fn download_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
DownloadLink(href)
}
// Handle the key press event for the hint mode.
// This hides the hints that are not relevant anymore.
fn enter_hint_key(&mut self, key: char) {
self.model.hint_keys.push(key);
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
// If no element is found, hide the unrelevant hints.
if element.is_some() {
// TODO: perhaps it'd involve less message if we remove the ActivateHint message.
self.send(ClickHintElement());
}
else {
let document = self.model.page.dom_document();
if let Some(document) = document {
let all_hidden = hide_unrelevant_hints(&document, &self.model.hint_keys);
if all_hidden {
self.model.hint_keys.clear();
show_all_hints(&document);
}
}
}
}
// Focus the first input element.
fn focus_input(&mut self) {
let document = self.model.page.dom_document();
if let Some(document) = document {
let tag_names = ["input", "textarea"];
let mut element_to_focus = None;
let mut element_y_pos = f32::INFINITY;
for tag_name in &tag_names {
let iter = get_elements_by_tag_name_in_all_frames(&document, tag_name);
for (document, element) in iter {
let tabindex = element.attribute("tabindex").map(Into::into);
if!is_hidden(&document, &element) && is_enabled(&element) && is_text_input(&element)
&& tabindex!= Some("-1".to_string())
{
if let Some(pos) = get_position(&element) {
// TODO: If y is equal, compare x?
if pos.y < element_y_pos {
element_y_pos = pos.y;
element_to_focus = Some(element);
}
}
}
}
}
if let Some(element) = element_to_focus {
element.focus();
element.scroll_into_view_if_needed(false);
self.send(EnterInsertMode());
}
}
}
// Hide all the hints.
fn hide_hints(&self) {
let elements =
self.model.page.dom_document()
.and_then(|document| document.element_by_id(HINTS_ID))
.and_then(|hints| get_hints_container(&self.model.page).map(|container| (hints, container)));
if let Some((hints, container)) = elements {
check_err!(container.remove_child(&hints));
}
}
fn hover(&mut self, element: DOMHTMLElement) -> Action {
if let Some(ref element) = self.model.last_hovered_element {
mouse_out(element);
}
self.model.last_hovered_element = Some(element.clone().upcast());
mouse_over(&element.upcast());
NoAction
}
fn insert_text(&self, text: &str) {
let document = get_document!(self);
let active_element = wtry_opt_no_ret!(document.active_element());
let element = wtry_no_show!(active_element.downcast::<DOMHTMLInputElement>());
element.set_value(text);
}
// Load the username and the password in the login form.
fn load_username_pass(&self, username: &str, password: &str) {
let document = get_document!(self);
load_username(&document, username);
load_password(&document, password);
}
// Set the selected file on the input[type="file"].
fn select_file(&mut self, file: &str) {
if let Some(ref input_file) = self.model.activated_file_input.take() {
// FIXME: this is not working.
input_file.set_value(file);
}
}
fn send(& | , message: InnerMessage) {
let bytes =
match encode(message) {
Ok(message) => message,
Err(error) => {
error!("{}", error);
return;
},
};
let message = UserMessage::new("", Some(&bytes.to_variant()));
self.model.page.send_message_to_view(&message, None::<&Cancellable>, |_| {});
}
// Get the username and password from the login form.
fn send_credentials(&mut self) {
let mut username = String::new();
let mut password = String::new();
let credential =
self.model.page.dom_document()
.and_then(|document| get_credentials(&document));
if let Some(credential) = credential {
username = credential.username;
password = credential.password;
}
// TODO: Send None instead of empty strings.
self.send(Credentials(username, password));
}
// Get the page scroll percentage.
fn send_scroll_percentage(&mut self) {
let percentage = self.scroll_percentage();
self.send(ScrollPercentage(percentage));
}
// Show the hint of elements using the hint characters.
// TODO: only send the hint characters once, not every time?
fn show_hints(&mut self, hint_chars: &str) {
self.model.hint_keys.clear();
let container = wtry_opt_no_ret!(get_hints_container(&self.model.page));
let document = wtry_opt_no_ret!(self.model.page.dom_document());
let (hints, hint_map) = wtry_opt_no_ret!(create_hints(&document, hint_chars));
self.model.hint_map = hint_map;
check_err!(container.append_child(&hints));
}
// Submit the login form.
fn submit_login_form(&self) {
let document = get_document!(self);
submit_login_form(&document);
}
}
| self | identifier_name |
mod.rs | /*
* Copyright (c) 2017-2018 Boucher, Antoni <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
macro_rules! get_document {
($_self:ident) => {{
let document = $_self.model.page.dom_document();
if let Some(document) = document {
document
}
else {
return;
}
}};
}
mod marks;
mod scroll;
use std::collections::HashMap;
use std::f32;
use std::sync::Mutex;
use gio::Cancellable;
use glib::{Cast, Closure, ObjectExt, ToVariant};
use regex::Regex;
use relm::{Relm, Update, UpdateNew};
use webkit2gtk_webextension::{
traits::{
DOMDocumentExt,
DOMDOMSelectionExt,
DOMDOMWindowExt,
DOMElementExt,
DOMEventTargetExt,
DOMHTMLElementExt,
DOMHTMLInputElementExt,
DOMNodeExt,
WebPageExt,
},
DOMElement,
DOMHTMLElement,
DOMHTMLInputElement,
DOMHTMLSelectElement,
DOMHTMLTextAreaElement,
UserMessage,
WebPage,
};
use titanium_common::{FollowMode, InnerMessage, protocol::encode};
use titanium_common::Action::{
self,
CopyLink,
DownloadLink,
FileInput,
GoInInsertMode,
NoAction,
};
use titanium_common::InnerMessage::*;
use dom::{
get_body,
get_elements_by_tag_name_in_all_frames,
get_hints_container,
get_href,
get_position,
is_enabled,
is_hidden,
is_text_input,
mouse_down,
click,
mouse_out,
mouse_over,
match_pattern,
};
use hints::{create_hints, hide_unrelevant_hints, show_all_hints, HINTS_ID};
use login_form::{get_credentials, load_password, load_username, submit_login_form};
use self::Msg::*;
pub struct Executor {
model: Model,
}
pub struct Model {
activated_file_input: Option<DOMHTMLInputElement>,
hint_keys: String,
hint_map: HashMap<String, DOMElement>,
last_hovered_element: Option<DOMElement>,
marks: HashMap<u8, u32>, // Byte to percent.
page: WebPage,
relm: Relm<Executor>,
scroll_element: Option<DOMElement>,
}
#[derive(Msg)]
pub enum Msg {
DocumentLoaded,
MessageRecv(InnerMessage),
Scroll,
}
impl Update for Executor {
type Model = Model;
type ModelParam = WebPage;
type Msg = Msg;
fn model(relm: &Relm<Self>, page: WebPage) -> Model {
Model {
activated_file_input: None,
hint_keys: String::new(),
hint_map: HashMap::new(),
last_hovered_element: None,
marks: HashMap::new(),
page,
relm: relm.clone(),
scroll_element: None,
}
}
fn update(&mut self, message: Msg) {
match message {
DocumentLoaded => {
self.init_scroll_element();
self.send_scroll_percentage();
let stream = self.model.relm.stream().clone();
let stream = Mutex::new(::send_cell::SendCell::new(stream));
let handler = Closure::new(move |_| {
let stream = stream.lock().unwrap();
stream.get().emit(Scroll);
None
});
if self.model.scroll_element == get_body(&self.model.page).map(|el| el.upcast()) {
let document = wtry_opt_no_ret!(self.model.page.dom_document());
document.add_event_listener_with_closure("scroll", &handler, false);
}
else {
let element = self.model.scroll_element.as_ref().unwrap();
element.add_event_listener_with_closure("scroll", &handler, false);
}
},
MessageRecv(msg) =>
match msg {
ActivateHint(follow_mode, ctrl_key) => self.activate_hint(follow_mode, ctrl_key),
ActivateSelection() => self.activate_selection(),
ClickNextPage() => self.click_next_page(),
ClickPrevPage() => self.click_prev_page(),
EnterHintKey(key) => self.enter_hint_key(key),
FocusInput() => self.focus_input(),
GetCredentials() => self.send_credentials(),
GoToMark(mark) => self.go_to_mark(mark),
HideHints() => self.hide_hints(),
InsertText(text) => self.insert_text(&text),
LoadUsernamePass(username, password) => self.load_username_pass(&username, &password),
Mark(char) => self.add_mark(char),
ResetMarks() => self.reset_marks(),
ResetScrollElement() => self.reset_scroll_element(),
ScrollBy(pixels) => self.scroll_by(pixels),
ScrollByX(pixels) => self.scroll_by_x(pixels),
ScrollTop() => self.scroll_top(),
ScrollToPercent(percent) => self.scroll_to_percent(percent),
SelectFile(file) => self.select_file(&file),
ShowHints(hint_chars) => self.show_hints(&hint_chars),
SubmitLoginForm() => self.submit_login_form(),
_ => warn!("Unexpected message received: {:?}", msg),
},
Scroll => self.send_scroll_percentage(),
}
}
}
impl UpdateNew for Executor {
fn new(_relm: &Relm<Self>, model: Model) -> Self {
Executor {
model,
}
}
}
impl Executor {
// Activate (click, focus, hover) the selected hint.
fn activate_hint(&mut self, follow_mode: FollowMode, ctrl_key: bool) {
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
match element {
Some(element) => {
self.hide_hints();
self.model.hint_map.clear();
self.model.hint_keys.clear();
let action =
match follow_mode {
FollowMode::Click => self.click(element, ctrl_key),
FollowMode::CopyLink => self.copy_link(element),
FollowMode::Download => self.download_link(element),
FollowMode::Hover => self.hover(element),
};
self.send(ActivateAction(action));
},
None => self.send(ActivateAction(NoAction)),
}
}
// Click on the link of the selected text.
fn activate_selection(&self) {
// TODO: switch to using some macros to simplify this code.
let result = self.model.page.dom_document()
.and_then(|document| document.default_view())
.and_then(|window| window.selection())
.and_then(|selection| selection.anchor_node())
.and_then(|anchor_node| anchor_node.parent_element())
.and_then(|parent| parent.downcast::<DOMHTMLElement>().ok());
if let Some(parent) = result {
parent.click();
}
}
fn click(&mut self, element: DOMHTMLElement, ctrl_key: bool) -> Action {
if let Ok(input_element) = element.clone().downcast::<DOMHTMLInputElement>() {
let input_type = input_element.input_type().map(|string| string.to_string()).unwrap_or_default();
match input_type.as_ref() {
"button" | "checkbox" | "image" | "radio" | "reset" | "submit" => {
click(&element.upcast(), ctrl_key);
NoAction
},
// FIXME: file and color not opening.
"color" => NoAction,
"file" => {
self.model.activated_file_input = Some(input_element);
FileInput
},
_ => {
element.focus();
GoInInsertMode
},
}
}
else if element.is::<DOMHTMLTextAreaElement>() {
element.focus();
GoInInsertMode
}
else if element.is::<DOMHTMLSelectElement>() {
if element.attribute("multiple").is_some() {
element.focus();
GoInInsertMode
}
else {
mouse_down(&element.upcast());
NoAction
}
}
else {
click(&element.upcast(), ctrl_key);
NoAction
}
}
fn copy_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
CopyLink(href)
}
fn click_next_page(&mut self) {
let regex = Regex::new(r"(?i:next|forward|older|more|›|»)|(?:<.+>)>(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: Check if url (not text) is *very* similar to our current one
// example.com/page/4 => example.com/page/5
warn!("No next link found");
}
}
fn click_prev_page(&mut self) {
let regex = Regex::new(r"(?i:prev(ious)|back|newer|less|«|‹)|(?:<.+>)<(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: See above
warn!("No previous link found");
}
}
fn download_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
DownloadLink(href)
}
// Handle the key press event for the hint mode.
// This hides the hints that are not relevant anymore.
fn enter_hint_key(&mut self, key: char) {
self.model.hint_keys.push(key);
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
// If no element is found, hide the unrelevant hints.
if element.is_some() {
// TODO: perhaps it'd involve less message if we remove the ActivateHint message.
self.send(ClickHintElement());
}
else {
let document = self.model.page.dom_document();
if let Some(document) = document {
let all_hidden = hide_unrelevant_hints(&document, &self.model.hint_keys);
if all_hidden {
self.model.hint_keys.clear();
show_all_hints(&document);
}
}
}
}
// Focus the first input element.
fn focus_input(&mut self) {
let document = self.model.page.dom_document();
if let Some(document) = document {
let tag_names = ["input", "textarea"];
let mut element_to_focus = None;
let mut element_y_pos = f32::INFINITY;
for tag_name in &tag_names {
let iter = get_elements_by_tag_name_in_all_frames(&document, tag_name);
for (document, element) in iter {
let tabindex = element.attribute("tabindex").map(Into::into);
if!is_hidden(&document, &element) && is_enabled(&element) && is_text_input(&element)
&& tabindex!= Some("-1".to_string())
{
if let Some(pos) = get_position(&element) {
// TODO: If y is equal, compare x?
if pos.y < element_y_pos {
element_y_pos = pos.y;
element_to_focus = Some(element);
}
}
}
}
}
if let Some(element) = element_to_focus {
element.focus();
element.scroll_into_view_if_needed(false);
self.send(EnterInsertMode());
}
}
}
// Hide all the hints.
fn hide_hints(&self) {
let elements =
self.model.page.dom_document()
.and_then(|document| document.element_by_id(HINTS_ID))
.and_then(|hints| get_hints_container(&self.model.page).map(|container| (hints, container)));
if let Some((hints, container)) = elements {
check_err!(container.remove_child(&hints));
}
}
fn hover(&mut self, element: DOMHTMLElement) -> Action {
if let Some(ref element) = self.model.last_hovered_element {
mouse_out(element);
}
self.model.last_hovered_element = Some(element.clone().upcast());
mouse_over(&element.upcast());
NoAction
}
fn insert_text(&self, text: &str) {
let document = get_document!(self);
let active_element = wtry_opt_no_ret!(document.active_element());
let element = wtry_no_show!(active_element.downcast::<DOMHTMLInputElement>());
element.set_value(text);
}
// Load the username and the password in the login form.
fn load_username_pass(&self, username: &str, password: &str) {
| // Set the selected file on the input[type="file"].
fn select_file(&mut self, file: &str) {
if let Some(ref input_file) = self.model.activated_file_input.take() {
// FIXME: this is not working.
input_file.set_value(file);
}
}
fn send(&self, message: InnerMessage) {
let bytes =
match encode(message) {
Ok(message) => message,
Err(error) => {
error!("{}", error);
return;
},
};
let message = UserMessage::new("", Some(&bytes.to_variant()));
self.model.page.send_message_to_view(&message, None::<&Cancellable>, |_| {});
}
// Get the username and password from the login form.
fn send_credentials(&mut self) {
let mut username = String::new();
let mut password = String::new();
let credential =
self.model.page.dom_document()
.and_then(|document| get_credentials(&document));
if let Some(credential) = credential {
username = credential.username;
password = credential.password;
}
// TODO: Send None instead of empty strings.
self.send(Credentials(username, password));
}
// Get the page scroll percentage.
fn send_scroll_percentage(&mut self) {
let percentage = self.scroll_percentage();
self.send(ScrollPercentage(percentage));
}
// Show the hint of elements using the hint characters.
// TODO: only send the hint characters once, not every time?
fn show_hints(&mut self, hint_chars: &str) {
self.model.hint_keys.clear();
let container = wtry_opt_no_ret!(get_hints_container(&self.model.page));
let document = wtry_opt_no_ret!(self.model.page.dom_document());
let (hints, hint_map) = wtry_opt_no_ret!(create_hints(&document, hint_chars));
self.model.hint_map = hint_map;
check_err!(container.append_child(&hints));
}
// Submit the login form.
fn submit_login_form(&self) {
let document = get_document!(self);
submit_login_form(&document);
}
}
| let document = get_document!(self);
load_username(&document, username);
load_password(&document, password);
}
| identifier_body |
mod.rs | /*
* Copyright (c) 2017-2018 Boucher, Antoni <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
macro_rules! get_document {
($_self:ident) => {{
let document = $_self.model.page.dom_document();
if let Some(document) = document {
document
}
else {
return;
}
}};
}
mod marks;
mod scroll;
use std::collections::HashMap;
use std::f32;
use std::sync::Mutex;
use gio::Cancellable;
use glib::{Cast, Closure, ObjectExt, ToVariant};
use regex::Regex;
use relm::{Relm, Update, UpdateNew};
use webkit2gtk_webextension::{
traits::{
DOMDocumentExt,
DOMDOMSelectionExt,
DOMDOMWindowExt,
DOMElementExt,
DOMEventTargetExt,
DOMHTMLElementExt,
DOMHTMLInputElementExt,
DOMNodeExt,
WebPageExt,
},
DOMElement,
DOMHTMLElement,
DOMHTMLInputElement,
DOMHTMLSelectElement,
DOMHTMLTextAreaElement,
UserMessage,
WebPage,
};
use titanium_common::{FollowMode, InnerMessage, protocol::encode};
use titanium_common::Action::{
self,
CopyLink,
DownloadLink,
FileInput,
GoInInsertMode,
NoAction,
};
use titanium_common::InnerMessage::*;
use dom::{
get_body,
get_elements_by_tag_name_in_all_frames,
get_hints_container,
get_href,
get_position,
is_enabled,
is_hidden,
is_text_input,
mouse_down,
click,
mouse_out,
mouse_over,
match_pattern,
};
use hints::{create_hints, hide_unrelevant_hints, show_all_hints, HINTS_ID};
use login_form::{get_credentials, load_password, load_username, submit_login_form};
use self::Msg::*;
pub struct Executor {
model: Model,
}
pub struct Model {
activated_file_input: Option<DOMHTMLInputElement>,
hint_keys: String,
hint_map: HashMap<String, DOMElement>,
last_hovered_element: Option<DOMElement>,
marks: HashMap<u8, u32>, // Byte to percent.
page: WebPage,
relm: Relm<Executor>,
scroll_element: Option<DOMElement>,
}
#[derive(Msg)]
pub enum Msg {
DocumentLoaded,
MessageRecv(InnerMessage),
Scroll,
}
impl Update for Executor {
type Model = Model;
type ModelParam = WebPage;
type Msg = Msg;
fn model(relm: &Relm<Self>, page: WebPage) -> Model {
Model {
activated_file_input: None,
hint_keys: String::new(),
hint_map: HashMap::new(),
last_hovered_element: None,
marks: HashMap::new(),
page,
relm: relm.clone(),
scroll_element: None,
}
}
fn update(&mut self, message: Msg) {
match message {
DocumentLoaded => {
self.init_scroll_element();
self.send_scroll_percentage();
let stream = self.model.relm.stream().clone();
let stream = Mutex::new(::send_cell::SendCell::new(stream));
let handler = Closure::new(move |_| {
let stream = stream.lock().unwrap();
stream.get().emit(Scroll);
None
});
if self.model.scroll_element == get_body(&self.model.page).map(|el| el.upcast()) {
let document = wtry_opt_no_ret!(self.model.page.dom_document());
document.add_event_listener_with_closure("scroll", &handler, false);
}
else {
let element = self.model.scroll_element.as_ref().unwrap();
element.add_event_listener_with_closure("scroll", &handler, false);
}
},
MessageRecv(msg) =>
match msg {
ActivateHint(follow_mode, ctrl_key) => self.activate_hint(follow_mode, ctrl_key),
ActivateSelection() => self.activate_selection(),
ClickNextPage() => self.click_next_page(),
ClickPrevPage() => self.click_prev_page(),
EnterHintKey(key) => self.enter_hint_key(key),
FocusInput() => self.focus_input(),
GetCredentials() => self.send_credentials(),
GoToMark(mark) => self.go_to_mark(mark),
HideHints() => self.hide_hints(),
InsertText(text) => self.insert_text(&text),
LoadUsernamePass(username, password) => self.load_username_pass(&username, &password),
Mark(char) => self.add_mark(char),
ResetMarks() => self.reset_marks(),
ResetScrollElement() => self.reset_scroll_element(),
ScrollBy(pixels) => self.scroll_by(pixels),
ScrollByX(pixels) => self.scroll_by_x(pixels),
ScrollTop() => self.scroll_top(),
ScrollToPercent(percent) => self.scroll_to_percent(percent),
SelectFile(file) => self.select_file(&file),
ShowHints(hint_chars) => self.show_hints(&hint_chars),
SubmitLoginForm() => self.submit_login_form(),
_ => warn!("Unexpected message received: {:?}", msg),
},
Scroll => self.send_scroll_percentage(),
}
}
}
impl UpdateNew for Executor {
fn new(_relm: &Relm<Self>, model: Model) -> Self {
Executor {
model,
}
}
}
impl Executor {
// Activate (click, focus, hover) the selected hint.
fn activate_hint(&mut self, follow_mode: FollowMode, ctrl_key: bool) {
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
match element {
Some(element) => {
self.hide_hints();
self.model.hint_map.clear();
self.model.hint_keys.clear();
let action =
match follow_mode {
FollowMode::Click => self.click(element, ctrl_key),
FollowMode::CopyLink => self.copy_link(element),
FollowMode::Download => self.download_link(element),
FollowMode::Hover => self.hover(element),
};
self.send(ActivateAction(action));
},
None => self.send(ActivateAction(NoAction)),
}
}
// Click on the link of the selected text.
fn activate_selection(&self) {
// TODO: switch to using some macros to simplify this code.
let result = self.model.page.dom_document()
.and_then(|document| document.default_view())
.and_then(|window| window.selection())
.and_then(|selection| selection.anchor_node())
.and_then(|anchor_node| anchor_node.parent_element())
.and_then(|parent| parent.downcast::<DOMHTMLElement>().ok());
if let Some(parent) = result {
parent.click();
}
}
fn click(&mut self, element: DOMHTMLElement, ctrl_key: bool) -> Action {
if let Ok(input_element) = element.clone().downcast::<DOMHTMLInputElement>() {
let input_type = input_element.input_type().map(|string| string.to_string()).unwrap_or_default();
match input_type.as_ref() {
"button" | "checkbox" | "image" | "radio" | "reset" | "submit" => {
click(&element.upcast(), ctrl_key);
NoAction
},
// FIXME: file and color not opening.
"color" => NoAction,
"file" => {
self.model.activated_file_input = Some(input_element);
FileInput
},
_ => {
element.focus();
GoInInsertMode
},
}
}
else if element.is::<DOMHTMLTextAreaElement>() {
element.focus();
GoInInsertMode
}
else if element.is::<DOMHTMLSelectElement>() {
if element.attribute("multiple").is_some() {
element.focus();
GoInInsertMode
}
else {
mouse_down(&element.upcast());
NoAction
}
}
else {
click(&element.upcast(), ctrl_key);
NoAction
}
}
fn copy_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
CopyLink(href)
}
fn click_next_page(&mut self) {
let regex = Regex::new(r"(?i:next|forward|older|more|›|»)|(?:<.+>)>(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: Check if url (not text) is *very* similar to our current one
// example.com/page/4 => example.com/page/5
warn!("No next link found");
}
}
fn click_prev_page(&mut self) {
let regex = Regex::new(r"(?i:prev(ious)|back|newer|less|«|‹)|(?:<.+>)<(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: See above
warn!("No previous link found");
}
}
fn download_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
DownloadLink(href)
}
// Handle the key press event for the hint mode.
// This hides the hints that are not relevant anymore.
fn enter_hint_key(&mut self, key: char) {
self.model.hint_keys.push(key);
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
// If no element is found, hide the unrelevant hints.
if element.is_some() {
// TODO: perhaps it'd involve less message if we remove the ActivateHint message.
self.send(ClickHintElement());
}
else {
let document = self.model.page.dom_document();
if let Some(document) = document {
let all_hidden = hide_unrelevant_hints(&document, &self.model.hint_keys);
if all_hidden {
self.model.hint_keys.clear();
show_all_hints(&document);
}
}
}
}
// Focus the first input element.
fn focus_input(&mut self) {
let document = self.model.page.dom_document();
if let Some(document) = document {
let tag_names = ["input", "textarea"];
let mut element_to_focus = None;
let mut element_y_pos = f32::INFINITY;
for tag_name in &tag_names {
let iter = get_elements_by_tag_name_in_all_frames(&document, tag_name);
for (document, element) in iter {
let tabindex = element.attribute("tabindex").map(Into::into);
if!is_hidden(&document, &element) && is_enabled(&element) && is_text_input(&element)
&& tabindex!= Some("-1".to_string())
{
if let Some(pos) = get_position(&element) {
// TODO: If y is equal, compare x?
if pos.y < element_y_pos {
element_y_pos = pos.y;
element_to_focus = Some(element);
}
}
}
}
}
if let Some(element) = element_to_focus {
element.focus();
element.scroll_into_view_if_needed(false);
self.send(EnterInsertMode());
}
}
}
// Hide all the hints.
fn hide_hints(&self) {
let elements =
self.model.page.dom_document()
.and_then(|document| document.element_by_id(HINTS_ID))
.and_then(|hints| get_hints_container(&self.model.page).map(|container| (hints, container)));
if let Some((hints, container)) = elements {
check_err!(container.remove_child(&hints));
}
}
fn hover(&mut self, element: DOMHTMLElement) -> Action {
if let Some(ref element) = self.model.last_hovered_element {
mouse_out(element);
}
self.model.last_hovered_element = Some(element.clone().upcast());
mouse_over(&element.upcast());
NoAction
}
fn insert_text(&self, text: &str) {
let document = get_document!(self);
let active_element = wtry_opt_no_ret!(document.active_element());
let element = wtry_no_show!(active_element.downcast::<DOMHTMLInputElement>());
element.set_value(text);
}
// Load the username and the password in the login form.
fn load_username_pass(&self, username: &str, password: &str) {
let document = get_document!(self);
load_username(&document, username);
load_password(&document, password);
}
// Set the selected file on the input[type="file"].
fn select_file(&mut self, file: &str) {
if let Some(ref input_file) = self.model.activated_file_input.take() {
// FIXME: this is not working.
input_file.set_value(file);
}
}
fn send(&self, message: InnerMessage) {
let bytes =
match encode(message) {
Ok(message) => message,
Err(error) => {
error!("{}", error);
return;
},
};
let message = UserMessage::new("", Some(&bytes.to_variant()));
self.model.page.send_message_to_view(&message, None::<&Cancellable>, |_| {});
}
// Get the username and password from the login form.
fn send_credentials(&mut self) {
let mut username = String::new();
let mut password = String::new();
let credential =
self.model.page.dom_document()
.and_then(|document| get_credentials(&document));
if let Some(credential) = credential {
username = credential.username;
password = credential.password;
}
// TODO: Send None instead of empty strings.
self.send(Credentials(username, password));
}
// Get the page scroll percentage.
fn send_scroll_percentage(&mut self) {
let percentage = self.scroll_percentage();
self.send(ScrollPercentage(percentage));
}
// Show the hint of elements using the hint characters.
// TODO: only send the hint characters once, not every time?
fn show_hints(&mut self, hint_chars: &str) {
self.model.hint_keys.clear();
let container = wtry_opt_no_ret!(get_hints_container(&self.model.page));
let document = wtry_opt_no_ret!(self.model.page.dom_document());
let (hints, hint_map) = wtry_opt_no_ret!(create_hints(&document, hint_chars));
self.model.hint_map = hint_map;
check_err!(container.append_child(&hints));
}
// Submit the login form.
fn submit_login_form(&self) {
let document = get_document!(self);
submit_login_form(&document);
}
} | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | random_line_split |
mod.rs | /*
* Copyright (c) 2017-2018 Boucher, Antoni <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
macro_rules! get_document {
($_self:ident) => {{
let document = $_self.model.page.dom_document();
if let Some(document) = document {
document
}
else {
return;
}
}};
}
mod marks;
mod scroll;
use std::collections::HashMap;
use std::f32;
use std::sync::Mutex;
use gio::Cancellable;
use glib::{Cast, Closure, ObjectExt, ToVariant};
use regex::Regex;
use relm::{Relm, Update, UpdateNew};
use webkit2gtk_webextension::{
traits::{
DOMDocumentExt,
DOMDOMSelectionExt,
DOMDOMWindowExt,
DOMElementExt,
DOMEventTargetExt,
DOMHTMLElementExt,
DOMHTMLInputElementExt,
DOMNodeExt,
WebPageExt,
},
DOMElement,
DOMHTMLElement,
DOMHTMLInputElement,
DOMHTMLSelectElement,
DOMHTMLTextAreaElement,
UserMessage,
WebPage,
};
use titanium_common::{FollowMode, InnerMessage, protocol::encode};
use titanium_common::Action::{
self,
CopyLink,
DownloadLink,
FileInput,
GoInInsertMode,
NoAction,
};
use titanium_common::InnerMessage::*;
use dom::{
get_body,
get_elements_by_tag_name_in_all_frames,
get_hints_container,
get_href,
get_position,
is_enabled,
is_hidden,
is_text_input,
mouse_down,
click,
mouse_out,
mouse_over,
match_pattern,
};
use hints::{create_hints, hide_unrelevant_hints, show_all_hints, HINTS_ID};
use login_form::{get_credentials, load_password, load_username, submit_login_form};
use self::Msg::*;
pub struct Executor {
model: Model,
}
pub struct Model {
activated_file_input: Option<DOMHTMLInputElement>,
hint_keys: String,
hint_map: HashMap<String, DOMElement>,
last_hovered_element: Option<DOMElement>,
marks: HashMap<u8, u32>, // Byte to percent.
page: WebPage,
relm: Relm<Executor>,
scroll_element: Option<DOMElement>,
}
#[derive(Msg)]
pub enum Msg {
DocumentLoaded,
MessageRecv(InnerMessage),
Scroll,
}
impl Update for Executor {
type Model = Model;
type ModelParam = WebPage;
type Msg = Msg;
fn model(relm: &Relm<Self>, page: WebPage) -> Model {
Model {
activated_file_input: None,
hint_keys: String::new(),
hint_map: HashMap::new(),
last_hovered_element: None,
marks: HashMap::new(),
page,
relm: relm.clone(),
scroll_element: None,
}
}
fn update(&mut self, message: Msg) {
match message {
DocumentLoaded => {
self.init_scroll_element();
self.send_scroll_percentage();
let stream = self.model.relm.stream().clone();
let stream = Mutex::new(::send_cell::SendCell::new(stream));
let handler = Closure::new(move |_| {
let stream = stream.lock().unwrap();
stream.get().emit(Scroll);
None
});
if self.model.scroll_element == get_body(&self.model.page).map(|el| el.upcast()) {
let document = wtry_opt_no_ret!(self.model.page.dom_document());
document.add_event_listener_with_closure("scroll", &handler, false);
}
else {
let element = self.model.scroll_element.as_ref().unwrap();
element.add_event_listener_with_closure("scroll", &handler, false);
}
},
MessageRecv(msg) =>
match msg {
ActivateHint(follow_mode, ctrl_key) => self.activate_hint(follow_mode, ctrl_key),
ActivateSelection() => self.activate_selection(),
ClickNextPage() => self.click_next_page(),
ClickPrevPage() => self.click_prev_page(),
EnterHintKey(key) => self.enter_hint_key(key),
FocusInput() => self.focus_input(),
GetCredentials() => self.send_credentials(),
GoToMark(mark) => self.go_to_mark(mark),
HideHints() => self.hide_hints(),
InsertText(text) => self.insert_text(&text),
LoadUsernamePass(username, password) => self.load_username_pass(&username, &password),
Mark(char) => self.add_mark(char),
ResetMarks() => self.reset_marks(),
ResetScrollElement() => self.reset_scroll_element(),
ScrollBy(pixels) => self.scroll_by(pixels),
ScrollByX(pixels) => self.scroll_by_x(pixels),
ScrollTop() => self.scroll_top(),
ScrollToPercent(percent) => self.scroll_to_percent(percent),
SelectFile(file) => self.select_file(&file),
ShowHints(hint_chars) => self.show_hints(&hint_chars),
SubmitLoginForm() => self.submit_login_form(),
_ => warn!("Unexpected message received: {:?}", msg),
},
Scroll => self.send_scroll_percentage(),
}
}
}
impl UpdateNew for Executor {
fn new(_relm: &Relm<Self>, model: Model) -> Self {
Executor {
model,
}
}
}
impl Executor {
// Activate (click, focus, hover) the selected hint.
fn activate_hint(&mut self, follow_mode: FollowMode, ctrl_key: bool) {
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
match element {
Some(element) => {
self.hide_hints();
self.model.hint_map.clear();
self.model.hint_keys.clear();
let action =
match follow_mode {
FollowMode::Click => self.click(element, ctrl_key),
FollowMode::CopyLink => self.copy_link(element),
FollowMode::Download => self.download_link(element),
FollowMode::Hover => self.hover(element),
};
self.send(ActivateAction(action));
},
None => self.send(ActivateAction(NoAction)),
}
}
// Click on the link of the selected text.
fn activate_selection(&self) {
// TODO: switch to using some macros to simplify this code.
let result = self.model.page.dom_document()
.and_then(|document| document.default_view())
.and_then(|window| window.selection())
.and_then(|selection| selection.anchor_node())
.and_then(|anchor_node| anchor_node.parent_element())
.and_then(|parent| parent.downcast::<DOMHTMLElement>().ok());
if let Some(parent) = result {
parent.click();
}
}
fn click(&mut self, element: DOMHTMLElement, ctrl_key: bool) -> Action {
if let Ok(input_element) = element.clone().downcast::<DOMHTMLInputElement>() {
let input_type = input_element.input_type().map(|string| string.to_string()).unwrap_or_default();
match input_type.as_ref() {
"button" | "checkbox" | "image" | "radio" | "reset" | "submit" => {
click(&element.upcast(), ctrl_key);
NoAction
},
// FIXME: file and color not opening.
"color" => NoAction,
"file" => {
self.model.activated_file_input = Some(input_element);
FileInput
},
_ => {
element.focus();
GoInInsertMode
},
}
}
else if element.is::<DOMHTMLTextAreaElement>() {
element.focus();
GoInInsertMode
}
else if element.is::<DOMHTMLSelectElement>() {
if element.attribute("multiple").is_some() {
element.focus();
GoInInsertMode
}
else {
mouse_down(&element.upcast());
NoAction
}
}
else {
click(&element.upcast(), ctrl_key);
NoAction
}
}
fn copy_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
CopyLink(href)
}
fn click_next_page(&mut self) {
let regex = Regex::new(r"(?i:next|forward|older|more|›|»)|(?:<.+>)>(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: Check if url (not text) is *very* similar to our current one
// example.com/page/4 => example.com/page/5
warn!("No next link found");
}
}
fn click_prev_page(&mut self) {
let regex = Regex::new(r"(?i:prev(ious)|back|newer|less|«|‹)|(?:<.+>)<(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: See above
warn!("No previous link found");
}
}
fn download_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
DownloadLink(href)
}
// Handle the key press event for the hint mode.
// This hides the hints that are not relevant anymore.
fn enter_hint_key(&mut self, key: char) {
self.model.hint_keys.push(key);
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
// If no element is found, hide the unrelevant hints.
if element.is_some() {
// TODO: perhaps it'd involve less message if we remove the ActivateHint message.
self.send(ClickHintElement());
}
else {
let document = self.model.page.dom_document();
if let Some(document) = document {
let all_hidden = hide_unrelevant_hints(&document, &self.model.hint_keys);
if all_hidden {
self.model.hint_keys.clear();
show_all_hints(&document);
}
}
}
}
// Focus the first input element.
fn focus_input(&mut self) {
let document = self.model.page.dom_document();
if let Some(document) = document {
let tag_names = ["input", "textarea"];
let mut element_to_focus = None;
let mut element_y_pos = f32::INFINITY;
for tag_name in &tag_names {
let iter = get_elements_by_tag_name_in_all_frames(&document, tag_name);
for (document, element) in iter {
let tabindex = element.attribute("tabindex").map(Into::into);
if!is_hidden(&document, &element) && is_enabled(&element) && is_text_input(&element)
&& tabindex!= Some("-1".to_string())
{
if let Some(pos) = get_position(&element) {
// TODO: If y is equal, compare x?
if pos.y < element_y_pos {
element_y_pos = pos.y;
element_to_focus = Some(element);
}
}
}
}
}
if let Some(element) = element_to_focus {
element.focus();
element.scroll_into_view_if_needed(false);
self.send(EnterInsertMode());
}
}
}
// Hide all the hints.
fn hide_hints(&self) {
let elements =
self.model.page.dom_document()
.and_then(|document| document.element_by_id(HINTS_ID))
.and_then(|hints| get_hints_container(&self.model.page).map(|container| (hints, container)));
if let Some((hints, container)) = elements {
check_err!(container.remove_child(&hints));
}
}
fn hover(&mut self, element: DOMHTMLElement) -> Action {
if let Some(ref element) = self.model.last_hovered_element {
mouse_out(element);
}
self.model.last_hovered_element = Some(element.clone().upcast());
mouse_over(&element.upcast());
NoAction
}
fn insert_text(&self, text: &str) {
let document = get_document!(self);
let active_element = wtry_opt_no_ret!(document.active_element());
let element = wtry_no_show!(active_element.downcast::<DOMHTMLInputElement>());
element.set_value(text);
}
// Load the username and the password in the login form.
fn load_username_pass(&self, username: &str, password: &str) {
let document = get_document!(self);
load_username(&document, username);
load_password(&document, password);
}
// Set the selected file on the input[type="file"].
fn select_file(&mut self, file: &str) {
if let Some(ref input_file) = self.model.activated_file_input.take() {
|
fn send(&self, message: InnerMessage) {
let bytes =
match encode(message) {
Ok(message) => message,
Err(error) => {
error!("{}", error);
return;
},
};
let message = UserMessage::new("", Some(&bytes.to_variant()));
self.model.page.send_message_to_view(&message, None::<&Cancellable>, |_| {});
}
// Get the username and password from the login form.
fn send_credentials(&mut self) {
let mut username = String::new();
let mut password = String::new();
let credential =
self.model.page.dom_document()
.and_then(|document| get_credentials(&document));
if let Some(credential) = credential {
username = credential.username;
password = credential.password;
}
// TODO: Send None instead of empty strings.
self.send(Credentials(username, password));
}
// Get the page scroll percentage.
fn send_scroll_percentage(&mut self) {
let percentage = self.scroll_percentage();
self.send(ScrollPercentage(percentage));
}
// Show the hint of elements using the hint characters.
// TODO: only send the hint characters once, not every time?
fn show_hints(&mut self, hint_chars: &str) {
self.model.hint_keys.clear();
let container = wtry_opt_no_ret!(get_hints_container(&self.model.page));
let document = wtry_opt_no_ret!(self.model.page.dom_document());
let (hints, hint_map) = wtry_opt_no_ret!(create_hints(&document, hint_chars));
self.model.hint_map = hint_map;
check_err!(container.append_child(&hints));
}
// Submit the login form.
fn submit_login_form(&self) {
let document = get_document!(self);
submit_login_form(&document);
}
}
| // FIXME: this is not working.
input_file.set_value(file);
}
} | conditional_block |
bidirectional.rs | use rand::{self, Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
use cgmath::{EuclideanSpace, InnerSpace, Point2, Vector3};
use collision::Ray3;
use super::{
algorithm::{contribute, make_tiles, Tile},
LocalProgress, Progress, Renderer, TaskRunner,
};
use crate::cameras::Camera;
use crate::film::{Film, Sample};
use crate::lamp::{RaySample, Surface};
use crate::tracer::{trace, Bounce, BounceType};
use crate::utils::pairs;
use crate::{
materials::ProbabilityInput,
math::DIST_EPSILON,
program::{ExecutionContext, Resources},
world::World,
};
use std::{
cell::Cell,
time::{Duration, Instant},
};
pub struct BidirParams {
pub bounces: u32,
}
pub(crate) fn render<F: FnMut(Progress<'_>)>(
film: &Film,
task_runner: TaskRunner,
mut on_status: F,
renderer: &Renderer,
config: &BidirParams,
world: &World,
camera: &Camera,
resources: &Resources,
) | index, rng, tile, film, camera, world, resources, renderer, config, progress,
);
},
|_, _| {
progress += 1;
on_status(Progress {
progress: ((progress * 100) / num_tiles) as u8,
message: &status_message,
});
},
);
}
fn render_tile<R: Rng>(
index: usize,
mut rng: R,
tile: Tile,
film: &Film,
camera: &Camera,
world: &World,
resources: &Resources,
renderer: &Renderer,
bidir_params: &BidirParams,
progress: LocalProgress,
) {
let mut lamp_path = Vec::with_capacity(bidir_params.bounces as usize + 1);
let mut camera_path = Vec::with_capacity(renderer.bounces as usize);
let mut additional_samples = Vec::with_capacity(renderer.spectrum_samples as usize - 1);
let mut exe = ExecutionContext::new(resources);
let iterations = tile.area() as u64 * renderer.pixel_samples as u64;
let message = format!("Tile {}", index);
let mut last_progress = Instant::now();
progress.show(message, iterations);
for i in 0..iterations {
if Instant::now() - last_progress > Duration::from_millis(100) {
progress.set_progress(i);
last_progress = Instant::now();
}
lamp_path.clear();
camera_path.clear();
additional_samples.clear();
let position = tile.sample_point(&mut rng);
additional_samples.extend(
film.sample_many_wavelengths(&mut rng, renderer.spectrum_samples as usize)
.map(|wavelength| {
(
Sample {
wavelength,
brightness: 0.0,
weight: 1.0,
},
1.0,
)
}),
);
let mut main_sample =
additional_samples.swap_remove(rng.gen_range(0..additional_samples.len()));
let wavelength = main_sample.0.wavelength;
let camera_ray = camera.ray_towards(&position, &mut rng);
let lamp_sample = world
.pick_lamp(&mut rng)
.and_then(|(l, p)| l.sample_ray(&mut rng).map(|r| (r, p)));
if let Some((lamp_sample, probability)) = lamp_sample {
let RaySample {
mut ray,
surface,
weight,
} = lamp_sample;
let (color, material_probability, dispersed, normal, texture) = match surface {
Surface::Physical {
normal,
material,
texture,
} => {
let component = material.choose_emissive(&mut rng);
let input = ProbabilityInput {
wavelength,
wavelength_used: Cell::new(false),
normal,
incident: -ray.direction,
texture_coordinate: texture,
};
let probability = component.get_probability(&mut exe, &input);
(
component.bsdf.color,
probability,
input.wavelength_used.get(),
normal,
texture,
)
}
Surface::Color(color) => (color, 1.0, false, ray.direction, Point2::origin()),
};
ray.origin += normal * DIST_EPSILON;
lamp_path.push(Bounce {
ty: BounceType::Emission,
dispersed,
color,
incident: Vector3::new(0.0, 0.0, 0.0),
position: ray.origin,
normal,
texture,
probability: weight / (probability * material_probability),
direct_light: vec![],
});
trace(
&mut lamp_path,
&mut rng,
ray,
wavelength,
world,
bidir_params.bounces,
0,
&mut exe,
);
pairs(&mut lamp_path, |to, from| {
to.incident = -from.incident;
if let BounceType::Diffuse(_, ref mut o) = from.ty {
*o = from.incident
}
});
if lamp_path.len() > 1 {
if let Some(last) = lamp_path.pop() {
match last.ty {
BounceType::Diffuse(_, _) | BounceType::Specular => lamp_path.push(last),
BounceType::Emission => {}
}
}
}
lamp_path.reverse();
}
trace(
&mut camera_path,
&mut rng,
camera_ray,
wavelength,
world,
renderer.bounces,
renderer.light_samples,
&mut exe,
);
let total = (camera_path.len() * lamp_path.len()) as f32;
let weight = 1.0 / total;
let mut use_additional = true;
for bounce in &camera_path {
use_additional =!bounce.dispersed && use_additional;
let additional_samples_slice = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples_slice, &mut exe);
for mut contribution in connect_paths(
&bounce,
&main_sample,
&additional_samples,
&lamp_path,
world,
use_additional,
&mut exe,
) {
contribution.weight = weight;
film.expose(position, contribution);
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
let weight = 1.0 / lamp_path.len() as f32;
for (i, bounce) in lamp_path.iter().enumerate() {
if let BounceType::Diffuse(_, _) = bounce.ty {
} else {
continue;
}
let camera_hit = camera.is_visible(bounce.position, &world, &mut rng);
if let Some((position, ray)) = camera_hit {
if position.x > -1.0 && position.x < 1.0 && position.y > -1.0 && position.y < 1.0 {
let sq_distance = (ray.origin - bounce.position).magnitude2();
let scale = 1.0 / (sq_distance);
let brdf_in = bounce.ty.brdf(-ray.direction, bounce.normal)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
main_sample.0.brightness = 0.0;
main_sample.0.weight = weight;
main_sample.1 = scale;
use_additional = true;
for &mut (ref mut sample, ref mut reflectance) in &mut additional_samples {
sample.brightness = 0.0;
sample.weight = weight;
*reflectance = scale;
}
for (i, bounce) in lamp_path[i..].iter().enumerate() {
use_additional =!bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples, &mut exe);
if i == 0 {
main_sample.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
}
}
}
}
}
fn connect_paths<'a>(
bounce: &Bounce<'a>,
main: &(Sample, f32),
additional: &[(Sample, f32)],
path: &[Bounce<'a>],
world: &World,
use_additional: bool,
exe: &mut ExecutionContext<'a>,
) -> Vec<Sample> {
let mut contributions = vec![];
let bounce_brdf = match bounce.ty {
BounceType::Emission | BounceType::Specular => return contributions,
BounceType::Diffuse(brdf, _) => brdf,
};
for (i, lamp_bounce) in path.iter().enumerate() {
if let BounceType::Specular = lamp_bounce.ty {
continue;
}
let from = bounce.position;
let to = lamp_bounce.position;
let direction = to - from;
let sq_distance = direction.magnitude2();
let distance = sq_distance.sqrt();
let ray = Ray3::new(from, direction / distance);
if bounce.normal.dot(ray.direction) <= 0.0 {
continue;
}
if lamp_bounce.normal.dot(-ray.direction) <= 0.0 {
continue;
}
let hit = world.intersect(ray).map(|hit| hit.distance);
if let Some(dist) = hit {
if dist < distance - DIST_EPSILON {
continue;
}
}
let cos_out = bounce.normal.dot(ray.direction).abs();
let cos_in = lamp_bounce.normal.dot(-ray.direction).abs();
let brdf_out = bounce_brdf(bounce.incident, bounce.normal, ray.direction)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
let scale = cos_in * cos_out * brdf_out / (2.0 * std::f32::consts::PI * sq_distance);
let brdf_in = lamp_bounce.ty.brdf(-ray.direction, lamp_bounce.normal)
/ lamp_bounce
.ty
.brdf(lamp_bounce.incident, lamp_bounce.normal);
let mut use_additional = use_additional;
let mut additional: Vec<_> = additional
.iter()
.cloned()
.map(|(s, r)| (s, r * scale))
.collect();
let mut main = main.clone();
main.1 *= scale;
for (i, bounce) in path[i..].iter().enumerate() {
use_additional =!bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional
} else {
&mut []
};
contribute(bounce, &mut main, additional_samples, exe);
if i == 0 {
main.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
contributions.push(main.0);
if use_additional {
contributions.extend(additional.into_iter().map(|(s, _)| s));
}
}
contributions
}
| {
fn gen_rng() -> XorShiftRng {
XorShiftRng::from_rng(rand::thread_rng()).expect("could not generate RNG")
}
let tiles = make_tiles(film.width(), film.height(), renderer.tile_size, camera);
let status_message = "Rendering";
on_status(Progress {
progress: 0,
message: &status_message,
});
let mut progress: usize = 0;
let num_tiles = tiles.len();
task_runner.run_tasks(
tiles.into_iter().map(|f| (f, gen_rng())),
|index, (tile, rng), progress| {
render_tile( | identifier_body |
bidirectional.rs | use rand::{self, Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
use cgmath::{EuclideanSpace, InnerSpace, Point2, Vector3};
use collision::Ray3;
use super::{
algorithm::{contribute, make_tiles, Tile},
LocalProgress, Progress, Renderer, TaskRunner,
};
use crate::cameras::Camera;
use crate::film::{Film, Sample};
use crate::lamp::{RaySample, Surface};
use crate::tracer::{trace, Bounce, BounceType};
use crate::utils::pairs;
use crate::{
materials::ProbabilityInput,
math::DIST_EPSILON,
program::{ExecutionContext, Resources},
world::World,
};
use std::{
cell::Cell,
time::{Duration, Instant},
};
pub struct BidirParams {
pub bounces: u32,
}
pub(crate) fn render<F: FnMut(Progress<'_>)>(
film: &Film,
task_runner: TaskRunner,
mut on_status: F,
renderer: &Renderer,
config: &BidirParams,
world: &World,
camera: &Camera,
resources: &Resources,
) {
fn gen_rng() -> XorShiftRng {
XorShiftRng::from_rng(rand::thread_rng()).expect("could not generate RNG")
}
let tiles = make_tiles(film.width(), film.height(), renderer.tile_size, camera);
let status_message = "Rendering";
on_status(Progress {
progress: 0,
message: &status_message,
});
let mut progress: usize = 0;
let num_tiles = tiles.len();
task_runner.run_tasks(
tiles.into_iter().map(|f| (f, gen_rng())),
|index, (tile, rng), progress| {
render_tile(
index, rng, tile, film, camera, world, resources, renderer, config, progress,
);
},
|_, _| {
progress += 1;
on_status(Progress {
progress: ((progress * 100) / num_tiles) as u8,
message: &status_message,
});
},
);
}
fn render_tile<R: Rng>(
index: usize,
mut rng: R,
tile: Tile,
film: &Film,
camera: &Camera,
world: &World,
resources: &Resources,
renderer: &Renderer,
bidir_params: &BidirParams,
progress: LocalProgress,
) {
let mut lamp_path = Vec::with_capacity(bidir_params.bounces as usize + 1);
let mut camera_path = Vec::with_capacity(renderer.bounces as usize);
let mut additional_samples = Vec::with_capacity(renderer.spectrum_samples as usize - 1);
let mut exe = ExecutionContext::new(resources);
let iterations = tile.area() as u64 * renderer.pixel_samples as u64;
let message = format!("Tile {}", index);
let mut last_progress = Instant::now();
progress.show(message, iterations);
for i in 0..iterations {
if Instant::now() - last_progress > Duration::from_millis(100) {
progress.set_progress(i);
last_progress = Instant::now();
}
lamp_path.clear();
camera_path.clear();
additional_samples.clear();
let position = tile.sample_point(&mut rng);
additional_samples.extend(
film.sample_many_wavelengths(&mut rng, renderer.spectrum_samples as usize)
.map(|wavelength| {
(
Sample {
wavelength,
brightness: 0.0,
weight: 1.0,
},
1.0,
)
}),
);
let mut main_sample =
additional_samples.swap_remove(rng.gen_range(0..additional_samples.len()));
let wavelength = main_sample.0.wavelength;
let camera_ray = camera.ray_towards(&position, &mut rng);
let lamp_sample = world
.pick_lamp(&mut rng)
.and_then(|(l, p)| l.sample_ray(&mut rng).map(|r| (r, p)));
if let Some((lamp_sample, probability)) = lamp_sample {
let RaySample {
mut ray,
surface,
weight,
} = lamp_sample;
let (color, material_probability, dispersed, normal, texture) = match surface {
Surface::Physical {
normal,
material,
texture,
} => {
let component = material.choose_emissive(&mut rng);
let input = ProbabilityInput {
wavelength,
wavelength_used: Cell::new(false),
normal,
incident: -ray.direction,
texture_coordinate: texture,
};
let probability = component.get_probability(&mut exe, &input);
(
component.bsdf.color,
probability,
input.wavelength_used.get(),
normal,
texture,
)
}
Surface::Color(color) => (color, 1.0, false, ray.direction, Point2::origin()),
};
ray.origin += normal * DIST_EPSILON;
lamp_path.push(Bounce {
ty: BounceType::Emission,
dispersed,
color,
incident: Vector3::new(0.0, 0.0, 0.0),
position: ray.origin,
normal,
texture,
probability: weight / (probability * material_probability),
direct_light: vec![],
});
trace(
&mut lamp_path,
&mut rng,
ray,
wavelength,
world,
bidir_params.bounces,
0,
&mut exe,
);
pairs(&mut lamp_path, |to, from| {
to.incident = -from.incident;
if let BounceType::Diffuse(_, ref mut o) = from.ty {
*o = from.incident
}
});
if lamp_path.len() > 1 {
if let Some(last) = lamp_path.pop() {
match last.ty {
BounceType::Diffuse(_, _) | BounceType::Specular => lamp_path.push(last),
BounceType::Emission => {}
}
}
}
lamp_path.reverse();
}
trace(
&mut camera_path,
&mut rng,
camera_ray,
wavelength,
world,
renderer.bounces,
renderer.light_samples,
&mut exe,
);
let total = (camera_path.len() * lamp_path.len()) as f32;
let weight = 1.0 / total;
let mut use_additional = true;
for bounce in &camera_path {
use_additional =!bounce.dispersed && use_additional;
let additional_samples_slice = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples_slice, &mut exe);
for mut contribution in connect_paths(
&bounce,
&main_sample,
&additional_samples,
&lamp_path,
world,
use_additional,
&mut exe,
) {
contribution.weight = weight;
film.expose(position, contribution);
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
let weight = 1.0 / lamp_path.len() as f32;
for (i, bounce) in lamp_path.iter().enumerate() {
if let BounceType::Diffuse(_, _) = bounce.ty {
} else {
continue;
}
let camera_hit = camera.is_visible(bounce.position, &world, &mut rng);
if let Some((position, ray)) = camera_hit {
if position.x > -1.0 && position.x < 1.0 && position.y > -1.0 && position.y < 1.0 {
let sq_distance = (ray.origin - bounce.position).magnitude2();
let scale = 1.0 / (sq_distance);
let brdf_in = bounce.ty.brdf(-ray.direction, bounce.normal)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
main_sample.0.brightness = 0.0;
main_sample.0.weight = weight;
main_sample.1 = scale;
use_additional = true;
for &mut (ref mut sample, ref mut reflectance) in &mut additional_samples {
sample.brightness = 0.0;
sample.weight = weight;
*reflectance = scale;
}
for (i, bounce) in lamp_path[i..].iter().enumerate() {
use_additional =!bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples, &mut exe);
if i == 0 {
main_sample.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
}
}
}
}
}
fn connect_paths<'a>(
bounce: &Bounce<'a>,
main: &(Sample, f32),
additional: &[(Sample, f32)],
path: &[Bounce<'a>],
world: &World,
use_additional: bool,
exe: &mut ExecutionContext<'a>,
) -> Vec<Sample> {
let mut contributions = vec![];
let bounce_brdf = match bounce.ty {
BounceType::Emission | BounceType::Specular => return contributions,
BounceType::Diffuse(brdf, _) => brdf,
};
for (i, lamp_bounce) in path.iter().enumerate() {
if let BounceType::Specular = lamp_bounce.ty {
continue;
}
let from = bounce.position;
let to = lamp_bounce.position;
let direction = to - from;
let sq_distance = direction.magnitude2();
let distance = sq_distance.sqrt();
let ray = Ray3::new(from, direction / distance);
if bounce.normal.dot(ray.direction) <= 0.0 {
continue;
}
if lamp_bounce.normal.dot(-ray.direction) <= 0.0 {
continue;
}
let hit = world.intersect(ray).map(|hit| hit.distance);
if let Some(dist) = hit {
if dist < distance - DIST_EPSILON {
continue;
}
}
let cos_out = bounce.normal.dot(ray.direction).abs();
let cos_in = lamp_bounce.normal.dot(-ray.direction).abs();
let brdf_out = bounce_brdf(bounce.incident, bounce.normal, ray.direction)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
let scale = cos_in * cos_out * brdf_out / (2.0 * std::f32::consts::PI * sq_distance);
let brdf_in = lamp_bounce.ty.brdf(-ray.direction, lamp_bounce.normal)
/ lamp_bounce
.ty
.brdf(lamp_bounce.incident, lamp_bounce.normal);
let mut use_additional = use_additional;
let mut additional: Vec<_> = additional
.iter()
.cloned()
.map(|(s, r)| (s, r * scale))
.collect();
let mut main = main.clone();
main.1 *= scale;
for (i, bounce) in path[i..].iter().enumerate() {
use_additional =!bounce.dispersed && use_additional;
let additional_samples = if use_additional | else {
&mut []
};
contribute(bounce, &mut main, additional_samples, exe);
if i == 0 {
main.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
contributions.push(main.0);
if use_additional {
contributions.extend(additional.into_iter().map(|(s, _)| s));
}
}
contributions
}
| {
&mut *additional
} | conditional_block |
bidirectional.rs | use rand::{self, Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
use cgmath::{EuclideanSpace, InnerSpace, Point2, Vector3};
use collision::Ray3;
use super::{
algorithm::{contribute, make_tiles, Tile},
LocalProgress, Progress, Renderer, TaskRunner,
};
use crate::cameras::Camera;
use crate::film::{Film, Sample};
use crate::lamp::{RaySample, Surface};
use crate::tracer::{trace, Bounce, BounceType};
use crate::utils::pairs;
use crate::{
materials::ProbabilityInput,
math::DIST_EPSILON,
program::{ExecutionContext, Resources},
world::World,
};
use std::{
cell::Cell,
time::{Duration, Instant},
};
pub struct BidirParams {
pub bounces: u32,
}
pub(crate) fn render<F: FnMut(Progress<'_>)>(
film: &Film,
task_runner: TaskRunner,
mut on_status: F,
renderer: &Renderer,
config: &BidirParams,
world: &World,
camera: &Camera,
resources: &Resources,
) {
fn | () -> XorShiftRng {
XorShiftRng::from_rng(rand::thread_rng()).expect("could not generate RNG")
}
let tiles = make_tiles(film.width(), film.height(), renderer.tile_size, camera);
let status_message = "Rendering";
on_status(Progress {
progress: 0,
message: &status_message,
});
let mut progress: usize = 0;
let num_tiles = tiles.len();
task_runner.run_tasks(
tiles.into_iter().map(|f| (f, gen_rng())),
|index, (tile, rng), progress| {
render_tile(
index, rng, tile, film, camera, world, resources, renderer, config, progress,
);
},
|_, _| {
progress += 1;
on_status(Progress {
progress: ((progress * 100) / num_tiles) as u8,
message: &status_message,
});
},
);
}
fn render_tile<R: Rng>(
index: usize,
mut rng: R,
tile: Tile,
film: &Film,
camera: &Camera,
world: &World,
resources: &Resources,
renderer: &Renderer,
bidir_params: &BidirParams,
progress: LocalProgress,
) {
let mut lamp_path = Vec::with_capacity(bidir_params.bounces as usize + 1);
let mut camera_path = Vec::with_capacity(renderer.bounces as usize);
let mut additional_samples = Vec::with_capacity(renderer.spectrum_samples as usize - 1);
let mut exe = ExecutionContext::new(resources);
let iterations = tile.area() as u64 * renderer.pixel_samples as u64;
let message = format!("Tile {}", index);
let mut last_progress = Instant::now();
progress.show(message, iterations);
for i in 0..iterations {
if Instant::now() - last_progress > Duration::from_millis(100) {
progress.set_progress(i);
last_progress = Instant::now();
}
lamp_path.clear();
camera_path.clear();
additional_samples.clear();
let position = tile.sample_point(&mut rng);
additional_samples.extend(
film.sample_many_wavelengths(&mut rng, renderer.spectrum_samples as usize)
.map(|wavelength| {
(
Sample {
wavelength,
brightness: 0.0,
weight: 1.0,
},
1.0,
)
}),
);
let mut main_sample =
additional_samples.swap_remove(rng.gen_range(0..additional_samples.len()));
let wavelength = main_sample.0.wavelength;
let camera_ray = camera.ray_towards(&position, &mut rng);
let lamp_sample = world
.pick_lamp(&mut rng)
.and_then(|(l, p)| l.sample_ray(&mut rng).map(|r| (r, p)));
if let Some((lamp_sample, probability)) = lamp_sample {
let RaySample {
mut ray,
surface,
weight,
} = lamp_sample;
let (color, material_probability, dispersed, normal, texture) = match surface {
Surface::Physical {
normal,
material,
texture,
} => {
let component = material.choose_emissive(&mut rng);
let input = ProbabilityInput {
wavelength,
wavelength_used: Cell::new(false),
normal,
incident: -ray.direction,
texture_coordinate: texture,
};
let probability = component.get_probability(&mut exe, &input);
(
component.bsdf.color,
probability,
input.wavelength_used.get(),
normal,
texture,
)
}
Surface::Color(color) => (color, 1.0, false, ray.direction, Point2::origin()),
};
ray.origin += normal * DIST_EPSILON;
lamp_path.push(Bounce {
ty: BounceType::Emission,
dispersed,
color,
incident: Vector3::new(0.0, 0.0, 0.0),
position: ray.origin,
normal,
texture,
probability: weight / (probability * material_probability),
direct_light: vec![],
});
trace(
&mut lamp_path,
&mut rng,
ray,
wavelength,
world,
bidir_params.bounces,
0,
&mut exe,
);
pairs(&mut lamp_path, |to, from| {
to.incident = -from.incident;
if let BounceType::Diffuse(_, ref mut o) = from.ty {
*o = from.incident
}
});
if lamp_path.len() > 1 {
if let Some(last) = lamp_path.pop() {
match last.ty {
BounceType::Diffuse(_, _) | BounceType::Specular => lamp_path.push(last),
BounceType::Emission => {}
}
}
}
lamp_path.reverse();
}
trace(
&mut camera_path,
&mut rng,
camera_ray,
wavelength,
world,
renderer.bounces,
renderer.light_samples,
&mut exe,
);
let total = (camera_path.len() * lamp_path.len()) as f32;
let weight = 1.0 / total;
let mut use_additional = true;
for bounce in &camera_path {
use_additional =!bounce.dispersed && use_additional;
let additional_samples_slice = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples_slice, &mut exe);
for mut contribution in connect_paths(
&bounce,
&main_sample,
&additional_samples,
&lamp_path,
world,
use_additional,
&mut exe,
) {
contribution.weight = weight;
film.expose(position, contribution);
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
let weight = 1.0 / lamp_path.len() as f32;
for (i, bounce) in lamp_path.iter().enumerate() {
if let BounceType::Diffuse(_, _) = bounce.ty {
} else {
continue;
}
let camera_hit = camera.is_visible(bounce.position, &world, &mut rng);
if let Some((position, ray)) = camera_hit {
if position.x > -1.0 && position.x < 1.0 && position.y > -1.0 && position.y < 1.0 {
let sq_distance = (ray.origin - bounce.position).magnitude2();
let scale = 1.0 / (sq_distance);
let brdf_in = bounce.ty.brdf(-ray.direction, bounce.normal)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
main_sample.0.brightness = 0.0;
main_sample.0.weight = weight;
main_sample.1 = scale;
use_additional = true;
for &mut (ref mut sample, ref mut reflectance) in &mut additional_samples {
sample.brightness = 0.0;
sample.weight = weight;
*reflectance = scale;
}
for (i, bounce) in lamp_path[i..].iter().enumerate() {
use_additional =!bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples, &mut exe);
if i == 0 {
main_sample.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
}
}
}
}
}
fn connect_paths<'a>(
bounce: &Bounce<'a>,
main: &(Sample, f32),
additional: &[(Sample, f32)],
path: &[Bounce<'a>],
world: &World,
use_additional: bool,
exe: &mut ExecutionContext<'a>,
) -> Vec<Sample> {
let mut contributions = vec![];
let bounce_brdf = match bounce.ty {
BounceType::Emission | BounceType::Specular => return contributions,
BounceType::Diffuse(brdf, _) => brdf,
};
for (i, lamp_bounce) in path.iter().enumerate() {
if let BounceType::Specular = lamp_bounce.ty {
continue;
}
let from = bounce.position;
let to = lamp_bounce.position;
let direction = to - from;
let sq_distance = direction.magnitude2();
let distance = sq_distance.sqrt();
let ray = Ray3::new(from, direction / distance);
if bounce.normal.dot(ray.direction) <= 0.0 {
continue;
}
if lamp_bounce.normal.dot(-ray.direction) <= 0.0 {
continue;
}
let hit = world.intersect(ray).map(|hit| hit.distance);
if let Some(dist) = hit {
if dist < distance - DIST_EPSILON {
continue;
}
}
let cos_out = bounce.normal.dot(ray.direction).abs();
let cos_in = lamp_bounce.normal.dot(-ray.direction).abs();
let brdf_out = bounce_brdf(bounce.incident, bounce.normal, ray.direction)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
let scale = cos_in * cos_out * brdf_out / (2.0 * std::f32::consts::PI * sq_distance);
let brdf_in = lamp_bounce.ty.brdf(-ray.direction, lamp_bounce.normal)
/ lamp_bounce
.ty
.brdf(lamp_bounce.incident, lamp_bounce.normal);
let mut use_additional = use_additional;
let mut additional: Vec<_> = additional
.iter()
.cloned()
.map(|(s, r)| (s, r * scale))
.collect();
let mut main = main.clone();
main.1 *= scale;
for (i, bounce) in path[i..].iter().enumerate() {
use_additional =!bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional
} else {
&mut []
};
contribute(bounce, &mut main, additional_samples, exe);
if i == 0 {
main.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
contributions.push(main.0);
if use_additional {
contributions.extend(additional.into_iter().map(|(s, _)| s));
}
}
contributions
}
| gen_rng | identifier_name |
bidirectional.rs | use cgmath::{EuclideanSpace, InnerSpace, Point2, Vector3};
use collision::Ray3;
use super::{
algorithm::{contribute, make_tiles, Tile},
LocalProgress, Progress, Renderer, TaskRunner,
};
use crate::cameras::Camera;
use crate::film::{Film, Sample};
use crate::lamp::{RaySample, Surface};
use crate::tracer::{trace, Bounce, BounceType};
use crate::utils::pairs;
use crate::{
materials::ProbabilityInput,
math::DIST_EPSILON,
program::{ExecutionContext, Resources},
world::World,
};
use std::{
cell::Cell,
time::{Duration, Instant},
};
pub struct BidirParams {
pub bounces: u32,
}
pub(crate) fn render<F: FnMut(Progress<'_>)>(
film: &Film,
task_runner: TaskRunner,
mut on_status: F,
renderer: &Renderer,
config: &BidirParams,
world: &World,
camera: &Camera,
resources: &Resources,
) {
fn gen_rng() -> XorShiftRng {
XorShiftRng::from_rng(rand::thread_rng()).expect("could not generate RNG")
}
let tiles = make_tiles(film.width(), film.height(), renderer.tile_size, camera);
let status_message = "Rendering";
on_status(Progress {
progress: 0,
message: &status_message,
});
let mut progress: usize = 0;
let num_tiles = tiles.len();
task_runner.run_tasks(
tiles.into_iter().map(|f| (f, gen_rng())),
|index, (tile, rng), progress| {
render_tile(
index, rng, tile, film, camera, world, resources, renderer, config, progress,
);
},
|_, _| {
progress += 1;
on_status(Progress {
progress: ((progress * 100) / num_tiles) as u8,
message: &status_message,
});
},
);
}
fn render_tile<R: Rng>(
index: usize,
mut rng: R,
tile: Tile,
film: &Film,
camera: &Camera,
world: &World,
resources: &Resources,
renderer: &Renderer,
bidir_params: &BidirParams,
progress: LocalProgress,
) {
let mut lamp_path = Vec::with_capacity(bidir_params.bounces as usize + 1);
let mut camera_path = Vec::with_capacity(renderer.bounces as usize);
let mut additional_samples = Vec::with_capacity(renderer.spectrum_samples as usize - 1);
let mut exe = ExecutionContext::new(resources);
let iterations = tile.area() as u64 * renderer.pixel_samples as u64;
let message = format!("Tile {}", index);
let mut last_progress = Instant::now();
progress.show(message, iterations);
for i in 0..iterations {
if Instant::now() - last_progress > Duration::from_millis(100) {
progress.set_progress(i);
last_progress = Instant::now();
}
lamp_path.clear();
camera_path.clear();
additional_samples.clear();
let position = tile.sample_point(&mut rng);
additional_samples.extend(
film.sample_many_wavelengths(&mut rng, renderer.spectrum_samples as usize)
.map(|wavelength| {
(
Sample {
wavelength,
brightness: 0.0,
weight: 1.0,
},
1.0,
)
}),
);
let mut main_sample =
additional_samples.swap_remove(rng.gen_range(0..additional_samples.len()));
let wavelength = main_sample.0.wavelength;
let camera_ray = camera.ray_towards(&position, &mut rng);
let lamp_sample = world
.pick_lamp(&mut rng)
.and_then(|(l, p)| l.sample_ray(&mut rng).map(|r| (r, p)));
if let Some((lamp_sample, probability)) = lamp_sample {
let RaySample {
mut ray,
surface,
weight,
} = lamp_sample;
let (color, material_probability, dispersed, normal, texture) = match surface {
Surface::Physical {
normal,
material,
texture,
} => {
let component = material.choose_emissive(&mut rng);
let input = ProbabilityInput {
wavelength,
wavelength_used: Cell::new(false),
normal,
incident: -ray.direction,
texture_coordinate: texture,
};
let probability = component.get_probability(&mut exe, &input);
(
component.bsdf.color,
probability,
input.wavelength_used.get(),
normal,
texture,
)
}
Surface::Color(color) => (color, 1.0, false, ray.direction, Point2::origin()),
};
ray.origin += normal * DIST_EPSILON;
lamp_path.push(Bounce {
ty: BounceType::Emission,
dispersed,
color,
incident: Vector3::new(0.0, 0.0, 0.0),
position: ray.origin,
normal,
texture,
probability: weight / (probability * material_probability),
direct_light: vec![],
});
trace(
&mut lamp_path,
&mut rng,
ray,
wavelength,
world,
bidir_params.bounces,
0,
&mut exe,
);
pairs(&mut lamp_path, |to, from| {
to.incident = -from.incident;
if let BounceType::Diffuse(_, ref mut o) = from.ty {
*o = from.incident
}
});
if lamp_path.len() > 1 {
if let Some(last) = lamp_path.pop() {
match last.ty {
BounceType::Diffuse(_, _) | BounceType::Specular => lamp_path.push(last),
BounceType::Emission => {}
}
}
}
lamp_path.reverse();
}
trace(
&mut camera_path,
&mut rng,
camera_ray,
wavelength,
world,
renderer.bounces,
renderer.light_samples,
&mut exe,
);
let total = (camera_path.len() * lamp_path.len()) as f32;
let weight = 1.0 / total;
let mut use_additional = true;
for bounce in &camera_path {
use_additional =!bounce.dispersed && use_additional;
let additional_samples_slice = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples_slice, &mut exe);
for mut contribution in connect_paths(
&bounce,
&main_sample,
&additional_samples,
&lamp_path,
world,
use_additional,
&mut exe,
) {
contribution.weight = weight;
film.expose(position, contribution);
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
let weight = 1.0 / lamp_path.len() as f32;
for (i, bounce) in lamp_path.iter().enumerate() {
if let BounceType::Diffuse(_, _) = bounce.ty {
} else {
continue;
}
let camera_hit = camera.is_visible(bounce.position, &world, &mut rng);
if let Some((position, ray)) = camera_hit {
if position.x > -1.0 && position.x < 1.0 && position.y > -1.0 && position.y < 1.0 {
let sq_distance = (ray.origin - bounce.position).magnitude2();
let scale = 1.0 / (sq_distance);
let brdf_in = bounce.ty.brdf(-ray.direction, bounce.normal)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
main_sample.0.brightness = 0.0;
main_sample.0.weight = weight;
main_sample.1 = scale;
use_additional = true;
for &mut (ref mut sample, ref mut reflectance) in &mut additional_samples {
sample.brightness = 0.0;
sample.weight = weight;
*reflectance = scale;
}
for (i, bounce) in lamp_path[i..].iter().enumerate() {
use_additional =!bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples, &mut exe);
if i == 0 {
main_sample.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
}
}
}
}
}
fn connect_paths<'a>(
bounce: &Bounce<'a>,
main: &(Sample, f32),
additional: &[(Sample, f32)],
path: &[Bounce<'a>],
world: &World,
use_additional: bool,
exe: &mut ExecutionContext<'a>,
) -> Vec<Sample> {
let mut contributions = vec![];
let bounce_brdf = match bounce.ty {
BounceType::Emission | BounceType::Specular => return contributions,
BounceType::Diffuse(brdf, _) => brdf,
};
for (i, lamp_bounce) in path.iter().enumerate() {
if let BounceType::Specular = lamp_bounce.ty {
continue;
}
let from = bounce.position;
let to = lamp_bounce.position;
let direction = to - from;
let sq_distance = direction.magnitude2();
let distance = sq_distance.sqrt();
let ray = Ray3::new(from, direction / distance);
if bounce.normal.dot(ray.direction) <= 0.0 {
continue;
}
if lamp_bounce.normal.dot(-ray.direction) <= 0.0 {
continue;
}
let hit = world.intersect(ray).map(|hit| hit.distance);
if let Some(dist) = hit {
if dist < distance - DIST_EPSILON {
continue;
}
}
let cos_out = bounce.normal.dot(ray.direction).abs();
let cos_in = lamp_bounce.normal.dot(-ray.direction).abs();
let brdf_out = bounce_brdf(bounce.incident, bounce.normal, ray.direction)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
let scale = cos_in * cos_out * brdf_out / (2.0 * std::f32::consts::PI * sq_distance);
let brdf_in = lamp_bounce.ty.brdf(-ray.direction, lamp_bounce.normal)
/ lamp_bounce
.ty
.brdf(lamp_bounce.incident, lamp_bounce.normal);
let mut use_additional = use_additional;
let mut additional: Vec<_> = additional
.iter()
.cloned()
.map(|(s, r)| (s, r * scale))
.collect();
let mut main = main.clone();
main.1 *= scale;
for (i, bounce) in path[i..].iter().enumerate() {
use_additional =!bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional
} else {
&mut []
};
contribute(bounce, &mut main, additional_samples, exe);
if i == 0 {
main.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
contributions.push(main.0);
if use_additional {
contributions.extend(additional.into_iter().map(|(s, _)| s));
}
}
contributions
} | use rand::{self, Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
| random_line_split |
|
ed25519.rs | // -*- mode: rust; -*-
//
// This file is part of ed25519-dalek.
// Copyright (c) 2017-2019 isis lovecruft
// See LICENSE for licensing information.
//
// Authors:
// - isis agora lovecruft <[email protected]>
//! ed25519 keypairs and batch verification.
use core::default::Default;
use rand::CryptoRng;
use rand::Rng;
#[cfg(feature = "serde")]
use serde::de::Error as SerdeError;
#[cfg(feature = "serde")]
use serde::de::Visitor;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "serde")]
use serde::{Deserializer, Serializer};
pub use sha2::Sha512;
use curve25519_dalek::digest::generic_array::typenum::U64;
pub use curve25519_dalek::digest::Digest;
use curve25519_dalek::constants;
use curve25519_dalek::edwards::EdwardsPoint;
use curve25519_dalek::scalar::Scalar;
pub use crate::constants::*;
pub use crate::errors::*;
pub use crate::public::*;
pub use crate::secret::*;
pub use crate::signature::*;
/// Verify a batch of `signatures` on `messages` with their respective `public_keys`.
///
/// # Inputs
///
/// * `messages` is a slice of byte slices, one per signed message.
/// * `signatures` is a slice of `Signature`s.
/// * `public_keys` is a slice of `PublicKey`s.
/// * `csprng` is an implementation of `Rng + CryptoRng`, such as
/// `rand::rngs::ThreadRng`.
///
/// # Panics
///
/// This function will panic if the `messages, `signatures`, and `public_keys`
/// slices are not equal length.
///
/// # Returns
///
/// * A `Result` whose `Ok` value is an emtpy tuple and whose `Err` value is a
/// `SignatureError` containing a description of the internal error which
/// occured.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::verify_batch;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
/// use rand::rngs::ThreadRng;
///
/// # fn main() {
/// let mut csprng: ThreadRng = thread_rng();
/// let keypairs: Vec<Keypair> = (0..64).map(|_| Keypair::generate(&mut csprng)).collect();
/// let msg: &[u8] = b"They're good dogs Brant";
/// let messages: Vec<&[u8]> = (0..64).map(|_| msg).collect();
/// let signatures: Vec<Signature> = keypairs.iter().map(|key| key.sign(&msg)).collect();
/// let public_keys: Vec<PublicKey> = keypairs.iter().map(|key| key.public).collect();
///
/// let result = verify_batch(&messages[..], &signatures[..], &public_keys[..]);
/// assert!(result.is_ok());
/// # }
/// ```
#[cfg(any(feature = "alloc", feature = "std"))]
#[allow(non_snake_case)]
pub fn verify_batch(
messages: &[&[u8]],
signatures: &[Signature],
public_keys: &[PublicKey],
) -> Result<(), SignatureError>
{
const ASSERT_MESSAGE: &'static [u8] = b"The number of messages, signatures, and public keys must be equal.";
assert!(signatures.len() == messages.len(), ASSERT_MESSAGE);
assert!(signatures.len() == public_keys.len(), ASSERT_MESSAGE);
assert!(public_keys.len() == messages.len(), ASSERT_MESSAGE);
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "std")]
use std::vec::Vec;
use core::iter::once;
use rand::thread_rng;
use curve25519_dalek::traits::IsIdentity;
use curve25519_dalek::traits::VartimeMultiscalarMul;
// Select a random 128-bit scalar for each signature.
let zs: Vec<Scalar> = signatures
.iter()
.map(|_| Scalar::from(thread_rng().gen::<u128>()))
.collect();
// Compute the basepoint coefficient, ∑ s[i]z[i] (mod l)
let B_coefficient: Scalar = signatures
.iter()
.map(|sig| sig.s)
.zip(zs.iter())
.map(|(s, z)| z * s)
.sum();
// Compute H(R || A || M) for each (signature, public_key, message) triplet
let hrams = (0..signatures.len()).map(|i| {
let mut h: Sha512 = Sha512::default();
h.input(signatures[i].R.as_bytes());
h.input(public_keys[i].as_bytes());
h.input(&messages[i]);
Scalar::from_hash(h)
});
// Multiply each H(R || A || M) by the random value
let zhrams = hrams.zip(zs.iter()).map(|(hram, z)| hram * z);
let Rs = signatures.iter().map(|sig| sig.R.decompress());
let As = public_keys.iter().map(|pk| Some(pk.1));
let B = once(Some(constants::ED25519_BASEPOINT_POINT));
// Compute (-∑ z[i]s[i] (mod l)) B + ∑ z[i]R[i] + ∑ (z[i]H(R||A||M)[i] (mod l)) A[i] = 0
let id = EdwardsPoint::optional_multiscalar_mul(
once(-B_coefficient).chain(zs.iter().cloned()).chain(zhrams),
B.chain(Rs).chain(As),
).ok_or_else(|| SignatureError(InternalError::VerifyError))?;
if id.is_identity() {
Ok(())
} else {
Err(SignatureError(InternalError::VerifyError))
}
}
/// An ed25519 keypair.
#[derive(Debug, Default)] // we derive Default in order to use the clear() method in Drop
pub struct Keypair {
/// The secret half of this keypair.
pub secret: SecretKey,
/// The public half of this keypair.
pub public: PublicKey,
}
impl Keypair {
/// Convert this keypair to bytes.
///
/// # Returns
///
/// An array of bytes, `[u8; KEYPAIR_LENGTH]`. The first
/// `SECRET_KEY_LENGTH` of bytes is the `SecretKey`, and the next
/// `PUBLIC_KEY_LENGTH` bytes is the `PublicKey` (the same as other
/// libraries, such as [Adam Langley's ed25519 Golang
/// implementation](https://github.com/agl/ed25519/)).
pub fn to_bytes(&self) -> [u8; KEYPAIR_LENGTH] {
let mut bytes: [u8; KEYPAIR_LENGTH] = [0u8; KEYPAIR_LENGTH];
bytes[..SECRET_KEY_LENGTH].copy_from_slice(self.secret.as_bytes());
bytes[SECRET_KEY_LENGTH..].copy_from_slice(self.public.as_bytes());
bytes
}
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `bytes`: an `&[u8]` representing the scalar for the secret key, and a
/// compressed Edwards-Y coordinate of a point on curve25519, both as bytes.
/// (As obtained from `Keypair::to_bytes()`.)
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Result` whose okay value is an EdDSA `Keypair` or whose error value
/// is an `SignatureError` describing the error that occurred.
pub fn from_bytes<'a>(bytes: &'a [u8]) -> Result<Keypair, SignatureError> {
if bytes.len()!= KEYPAIR_LENGTH {
return Err(SignatureError(InternalError::BytesLengthError {
name: "Keypair",
length: KEYPAIR_LENGTH,
}));
}
let secret = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH])?;
let public = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..])?;
Ok(Keypair{ secret: secret, public: public })
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate ed25519_dalek;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
///
/// use rand::Rng;
/// use rand::rngs::OsRng;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let keypair: Keypair = Keypair::generate(&mut OsRng);
///
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. `rand_chacha::ChaChaRng`.
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
pub fn generate<R>(csprng: &mut R) -> Keypair
where
R: CryptoRng + Rng,
{
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = (&sk).into();
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign(&self, message: &[u8]) -> Signature {
let expanded: ExpandedSecretKey = (&self.secret).into();
expanded.sign(&message, &self.public)
}
/// Sign a `prehashed_message` with this `Keypair` using the
/// Ed25519ph algorithm defined in [RFC8032 §5.1][rfc8032].
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
///
/// # Returns
///
/// An Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Sha512;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// // Create a hash digest object which we'll feed the message into:
/// let mut prehashed: Sha512 = Sha512::new();
///
/// prehashed.input(message);
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// If you want, you can optionally pass a "context". It is generally a
/// good idea to choose a context and try to make it unique to your project
/// and this specific usage of signatures.
///
/// For example, without this, if you were to [convert your OpenPGP key
/// to a Bitcoin key][terrible_idea] (just as an example, and also Don't
/// Ever Do That) and someone tricked you into signing an "email" which was
/// actually a Bitcoin transaction moving all your magic internet money to
/// their address, it'd be a valid transaction.
///
/// By adding a context, this trick becomes impossible, because the context
/// is concatenated into the hash, which is then signed. So, going with the
/// previous example, if your bitcoin wallet used a context of
/// "BitcoinWalletAppTxnSigning" and OpenPGP used a context (this is likely
/// the least of their safety problems) of "GPGsCryptoIsntConstantTimeLol",
/// then the signatures produced by both could never match the other, even
/// if they signed the exact same message with the same key.
///
/// Let's add a context for good measure (remember, you'll want to choose
/// your own!):
///
/// ```
/// # extern crate ed25519_dalek;
/// # extern crate rand;
/// #
/// # use ed25519_dalek::Digest;
/// # use ed25519_dalek::Keypair;
/// # use ed25519_dalek::Signature;
/// # use ed25519_dalek::Sha512;
/// # use rand::thread_rng;
/// #
/// # #[cfg(feature = "std")]
/// # fn main() {
/// # let mut csprng = thread_rng();
/// # let keypair: Keypair = Keypair::generate(&mut csprng);
/// # let message: &[u8] = b"All I want is to pet all of the dogs.";
/// # let mut prehashed: Sha512 = Sha512::new();
/// # prehashed.input(message);
/// #
/// let context: &[u8] = b"Ed25519DalekSignPrehashedDoctest";
///
/// let sig: Signature = keypair.sign_prehashed(prehashed, Some(context));
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// [rfc8032]: https://tools.ietf.org/html/rfc8032#section-5.1
/// [terrible_idea]: https://github.com/isislovecruft/scripts/blob/master/gpgkey2bc.py
pub fn sign_prehashed<D>(
&self,
prehashed_message: D,
context: Option<&'static [u8]>,
) -> Signature
where
D: Digest<OutputSize = U64>,
{
let expanded: ExpandedSecretKey = (&self.secret).into(); // xxx thanks i hate this
expanded.sign_prehashed(prehashed_message, &self.public, context)
}
/// Verify a signature on a message with this keypair's public key.
pub fn verify(
&self,
message: &[u8],
signature: &Signature
) -> Result<(), SignatureError>
{
self.public.verify(message, signature)
}
/// Verify a `signature` on a `prehashed_message` using the Ed25519ph algorithm.
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
/// * `signature` is a purported Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Returns
///
/// Returns `true` if the `signature` was a valid signature created by this
/// `Keypair` on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
/// use ed25519_dalek::Sha512;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// let mut prehashed: Sha512 = Sha512::default();
/// prehashed.input(message);
///
/// let context: &[u8] = b"Ed25519DalekSignPrehashedDoctest";
///
/// let sig: Signature = keypair.sign_prehashed(prehashed, Some(context));
///
/// // The sha2::Sha512 struct doesn't implement Copy, so we'll have to create a new one:
/// let mut prehashed_again: Sha512 = Sha512::default();
/// prehashed_again.input(message);
///
/// let verified = keypair.public.verify_prehashed(prehashed_again, Some(context), &sig);
///
/// assert!(verified.is_ok());
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// [rfc8032]: https://tools.ietf.org/html/rfc8032#section-5.1
pub fn verify_prehashed<D>(
&self,
prehashed_message: D,
context: Option<&[u8]>,
signature: &Signature,
) -> Result<(), SignatureError>
where
D: Digest<OutputSize = U64>,
{
self.public.verify_prehashed(prehashed_message, context, signature)
}
}
#[cfg(feature = "serde")]
impl Serialize for Keypair {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_bytes(&self.to_bytes()[..])
}
}
#[cfg(feature = "serde")]
impl<'d> Deserialize<'d> for Keypair {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'d>,
{
struct KeypairVisitor;
impl<'d> Visitor<'d> for KeypairVisitor {
type Value = Keypair;
fn expecting(&self, formatter: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
| fn visit_bytes<E>(self, bytes: &[u8]) -> Result<Keypair, E>
where
E: SerdeError,
{
let secret_key = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH]);
let public_key = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..]);
if secret_key.is_ok() && public_key.is_ok() {
Ok(Keypair{ secret: secret_key.unwrap(), public: public_key.unwrap() })
} else {
Err(SerdeError::invalid_length(bytes.len(), &self))
}
}
}
deserializer.deserialize_bytes(KeypairVisitor)
}
}
#[cfg(test)]
mod test {
use super::*;
use clear_on_drop::clear::Clear;
#[test]
fn keypair_clear_on_drop() {
let mut keypair: Keypair = Keypair::from_bytes(&[1u8; KEYPAIR_LENGTH][..]).unwrap();
keypair.clear();
fn as_bytes<T>(x: &T) -> &[u8] {
use std::mem;
use std::slice;
unsafe { slice::from_raw_parts(x as *const T as *const u8, mem::size_of_val(x)) }
}
assert!(!as_bytes(&keypair).contains(&0x15));
}
}
| formatter.write_str("An ed25519 keypair, 64 bytes in total where the secret key is \
the first 32 bytes and is in unexpanded form, and the second \
32 bytes is a compressed point for a public key.")
}
| identifier_body |
ed25519.rs | // -*- mode: rust; -*-
//
// This file is part of ed25519-dalek.
// Copyright (c) 2017-2019 isis lovecruft
// See LICENSE for licensing information.
//
// Authors:
// - isis agora lovecruft <[email protected]>
//! ed25519 keypairs and batch verification.
use core::default::Default;
use rand::CryptoRng;
use rand::Rng;
#[cfg(feature = "serde")]
use serde::de::Error as SerdeError;
#[cfg(feature = "serde")]
use serde::de::Visitor;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "serde")]
use serde::{Deserializer, Serializer};
pub use sha2::Sha512;
use curve25519_dalek::digest::generic_array::typenum::U64;
pub use curve25519_dalek::digest::Digest;
use curve25519_dalek::constants;
use curve25519_dalek::edwards::EdwardsPoint;
use curve25519_dalek::scalar::Scalar;
pub use crate::constants::*;
pub use crate::errors::*;
pub use crate::public::*;
pub use crate::secret::*;
pub use crate::signature::*;
/// Verify a batch of `signatures` on `messages` with their respective `public_keys`.
///
/// # Inputs
///
/// * `messages` is a slice of byte slices, one per signed message.
/// * `signatures` is a slice of `Signature`s.
/// * `public_keys` is a slice of `PublicKey`s.
/// * `csprng` is an implementation of `Rng + CryptoRng`, such as
/// `rand::rngs::ThreadRng`.
///
/// # Panics
///
/// This function will panic if the `messages, `signatures`, and `public_keys`
/// slices are not equal length.
///
/// # Returns
///
/// * A `Result` whose `Ok` value is an emtpy tuple and whose `Err` value is a
/// `SignatureError` containing a description of the internal error which
/// occured.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::verify_batch;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
/// use rand::rngs::ThreadRng;
///
/// # fn main() {
/// let mut csprng: ThreadRng = thread_rng();
/// let keypairs: Vec<Keypair> = (0..64).map(|_| Keypair::generate(&mut csprng)).collect();
/// let msg: &[u8] = b"They're good dogs Brant";
/// let messages: Vec<&[u8]> = (0..64).map(|_| msg).collect();
/// let signatures: Vec<Signature> = keypairs.iter().map(|key| key.sign(&msg)).collect();
/// let public_keys: Vec<PublicKey> = keypairs.iter().map(|key| key.public).collect();
///
/// let result = verify_batch(&messages[..], &signatures[..], &public_keys[..]);
/// assert!(result.is_ok());
/// # }
/// ```
#[cfg(any(feature = "alloc", feature = "std"))]
#[allow(non_snake_case)]
pub fn verify_batch(
messages: &[&[u8]],
signatures: &[Signature],
public_keys: &[PublicKey],
) -> Result<(), SignatureError>
{
const ASSERT_MESSAGE: &'static [u8] = b"The number of messages, signatures, and public keys must be equal.";
assert!(signatures.len() == messages.len(), ASSERT_MESSAGE);
assert!(signatures.len() == public_keys.len(), ASSERT_MESSAGE);
assert!(public_keys.len() == messages.len(), ASSERT_MESSAGE);
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "std")]
use std::vec::Vec;
use core::iter::once; | // Select a random 128-bit scalar for each signature.
let zs: Vec<Scalar> = signatures
.iter()
.map(|_| Scalar::from(thread_rng().gen::<u128>()))
.collect();
// Compute the basepoint coefficient, ∑ s[i]z[i] (mod l)
let B_coefficient: Scalar = signatures
.iter()
.map(|sig| sig.s)
.zip(zs.iter())
.map(|(s, z)| z * s)
.sum();
// Compute H(R || A || M) for each (signature, public_key, message) triplet
let hrams = (0..signatures.len()).map(|i| {
let mut h: Sha512 = Sha512::default();
h.input(signatures[i].R.as_bytes());
h.input(public_keys[i].as_bytes());
h.input(&messages[i]);
Scalar::from_hash(h)
});
// Multiply each H(R || A || M) by the random value
let zhrams = hrams.zip(zs.iter()).map(|(hram, z)| hram * z);
let Rs = signatures.iter().map(|sig| sig.R.decompress());
let As = public_keys.iter().map(|pk| Some(pk.1));
let B = once(Some(constants::ED25519_BASEPOINT_POINT));
// Compute (-∑ z[i]s[i] (mod l)) B + ∑ z[i]R[i] + ∑ (z[i]H(R||A||M)[i] (mod l)) A[i] = 0
let id = EdwardsPoint::optional_multiscalar_mul(
once(-B_coefficient).chain(zs.iter().cloned()).chain(zhrams),
B.chain(Rs).chain(As),
).ok_or_else(|| SignatureError(InternalError::VerifyError))?;
if id.is_identity() {
Ok(())
} else {
Err(SignatureError(InternalError::VerifyError))
}
}
/// An ed25519 keypair.
#[derive(Debug, Default)] // we derive Default in order to use the clear() method in Drop
pub struct Keypair {
/// The secret half of this keypair.
pub secret: SecretKey,
/// The public half of this keypair.
pub public: PublicKey,
}
impl Keypair {
/// Convert this keypair to bytes.
///
/// # Returns
///
/// An array of bytes, `[u8; KEYPAIR_LENGTH]`. The first
/// `SECRET_KEY_LENGTH` of bytes is the `SecretKey`, and the next
/// `PUBLIC_KEY_LENGTH` bytes is the `PublicKey` (the same as other
/// libraries, such as [Adam Langley's ed25519 Golang
/// implementation](https://github.com/agl/ed25519/)).
pub fn to_bytes(&self) -> [u8; KEYPAIR_LENGTH] {
let mut bytes: [u8; KEYPAIR_LENGTH] = [0u8; KEYPAIR_LENGTH];
bytes[..SECRET_KEY_LENGTH].copy_from_slice(self.secret.as_bytes());
bytes[SECRET_KEY_LENGTH..].copy_from_slice(self.public.as_bytes());
bytes
}
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `bytes`: an `&[u8]` representing the scalar for the secret key, and a
/// compressed Edwards-Y coordinate of a point on curve25519, both as bytes.
/// (As obtained from `Keypair::to_bytes()`.)
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Result` whose okay value is an EdDSA `Keypair` or whose error value
/// is an `SignatureError` describing the error that occurred.
pub fn from_bytes<'a>(bytes: &'a [u8]) -> Result<Keypair, SignatureError> {
if bytes.len()!= KEYPAIR_LENGTH {
return Err(SignatureError(InternalError::BytesLengthError {
name: "Keypair",
length: KEYPAIR_LENGTH,
}));
}
let secret = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH])?;
let public = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..])?;
Ok(Keypair{ secret: secret, public: public })
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate ed25519_dalek;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
///
/// use rand::Rng;
/// use rand::rngs::OsRng;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let keypair: Keypair = Keypair::generate(&mut OsRng);
///
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. `rand_chacha::ChaChaRng`.
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
pub fn generate<R>(csprng: &mut R) -> Keypair
where
R: CryptoRng + Rng,
{
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = (&sk).into();
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign(&self, message: &[u8]) -> Signature {
let expanded: ExpandedSecretKey = (&self.secret).into();
expanded.sign(&message, &self.public)
}
/// Sign a `prehashed_message` with this `Keypair` using the
/// Ed25519ph algorithm defined in [RFC8032 §5.1][rfc8032].
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
///
/// # Returns
///
/// An Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Sha512;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// // Create a hash digest object which we'll feed the message into:
/// let mut prehashed: Sha512 = Sha512::new();
///
/// prehashed.input(message);
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// If you want, you can optionally pass a "context". It is generally a
/// good idea to choose a context and try to make it unique to your project
/// and this specific usage of signatures.
///
/// For example, without this, if you were to [convert your OpenPGP key
/// to a Bitcoin key][terrible_idea] (just as an example, and also Don't
/// Ever Do That) and someone tricked you into signing an "email" which was
/// actually a Bitcoin transaction moving all your magic internet money to
/// their address, it'd be a valid transaction.
///
/// By adding a context, this trick becomes impossible, because the context
/// is concatenated into the hash, which is then signed. So, going with the
/// previous example, if your bitcoin wallet used a context of
/// "BitcoinWalletAppTxnSigning" and OpenPGP used a context (this is likely
/// the least of their safety problems) of "GPGsCryptoIsntConstantTimeLol",
/// then the signatures produced by both could never match the other, even
/// if they signed the exact same message with the same key.
///
/// Let's add a context for good measure (remember, you'll want to choose
/// your own!):
///
/// ```
/// # extern crate ed25519_dalek;
/// # extern crate rand;
/// #
/// # use ed25519_dalek::Digest;
/// # use ed25519_dalek::Keypair;
/// # use ed25519_dalek::Signature;
/// # use ed25519_dalek::Sha512;
/// # use rand::thread_rng;
/// #
/// # #[cfg(feature = "std")]
/// # fn main() {
/// # let mut csprng = thread_rng();
/// # let keypair: Keypair = Keypair::generate(&mut csprng);
/// # let message: &[u8] = b"All I want is to pet all of the dogs.";
/// # let mut prehashed: Sha512 = Sha512::new();
/// # prehashed.input(message);
/// #
/// let context: &[u8] = b"Ed25519DalekSignPrehashedDoctest";
///
/// let sig: Signature = keypair.sign_prehashed(prehashed, Some(context));
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// [rfc8032]: https://tools.ietf.org/html/rfc8032#section-5.1
/// [terrible_idea]: https://github.com/isislovecruft/scripts/blob/master/gpgkey2bc.py
pub fn sign_prehashed<D>(
&self,
prehashed_message: D,
context: Option<&'static [u8]>,
) -> Signature
where
D: Digest<OutputSize = U64>,
{
let expanded: ExpandedSecretKey = (&self.secret).into(); // xxx thanks i hate this
expanded.sign_prehashed(prehashed_message, &self.public, context)
}
/// Verify a signature on a message with this keypair's public key.
pub fn verify(
&self,
message: &[u8],
signature: &Signature
) -> Result<(), SignatureError>
{
self.public.verify(message, signature)
}
/// Verify a `signature` on a `prehashed_message` using the Ed25519ph algorithm.
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
/// * `signature` is a purported Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Returns
///
/// Returns `true` if the `signature` was a valid signature created by this
/// `Keypair` on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
/// use ed25519_dalek::Sha512;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// let mut prehashed: Sha512 = Sha512::default();
/// prehashed.input(message);
///
/// let context: &[u8] = b"Ed25519DalekSignPrehashedDoctest";
///
/// let sig: Signature = keypair.sign_prehashed(prehashed, Some(context));
///
/// // The sha2::Sha512 struct doesn't implement Copy, so we'll have to create a new one:
/// let mut prehashed_again: Sha512 = Sha512::default();
/// prehashed_again.input(message);
///
/// let verified = keypair.public.verify_prehashed(prehashed_again, Some(context), &sig);
///
/// assert!(verified.is_ok());
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// [rfc8032]: https://tools.ietf.org/html/rfc8032#section-5.1
pub fn verify_prehashed<D>(
&self,
prehashed_message: D,
context: Option<&[u8]>,
signature: &Signature,
) -> Result<(), SignatureError>
where
D: Digest<OutputSize = U64>,
{
self.public.verify_prehashed(prehashed_message, context, signature)
}
}
#[cfg(feature = "serde")]
impl Serialize for Keypair {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_bytes(&self.to_bytes()[..])
}
}
#[cfg(feature = "serde")]
impl<'d> Deserialize<'d> for Keypair {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'d>,
{
struct KeypairVisitor;
impl<'d> Visitor<'d> for KeypairVisitor {
type Value = Keypair;
fn expecting(&self, formatter: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
formatter.write_str("An ed25519 keypair, 64 bytes in total where the secret key is \
the first 32 bytes and is in unexpanded form, and the second \
32 bytes is a compressed point for a public key.")
}
fn visit_bytes<E>(self, bytes: &[u8]) -> Result<Keypair, E>
where
E: SerdeError,
{
let secret_key = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH]);
let public_key = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..]);
if secret_key.is_ok() && public_key.is_ok() {
Ok(Keypair{ secret: secret_key.unwrap(), public: public_key.unwrap() })
} else {
Err(SerdeError::invalid_length(bytes.len(), &self))
}
}
}
deserializer.deserialize_bytes(KeypairVisitor)
}
}
#[cfg(test)]
mod test {
use super::*;
use clear_on_drop::clear::Clear;
#[test]
fn keypair_clear_on_drop() {
let mut keypair: Keypair = Keypair::from_bytes(&[1u8; KEYPAIR_LENGTH][..]).unwrap();
keypair.clear();
fn as_bytes<T>(x: &T) -> &[u8] {
use std::mem;
use std::slice;
unsafe { slice::from_raw_parts(x as *const T as *const u8, mem::size_of_val(x)) }
}
assert!(!as_bytes(&keypair).contains(&0x15));
}
} | use rand::thread_rng;
use curve25519_dalek::traits::IsIdentity;
use curve25519_dalek::traits::VartimeMultiscalarMul;
| random_line_split |
ed25519.rs | // -*- mode: rust; -*-
//
// This file is part of ed25519-dalek.
// Copyright (c) 2017-2019 isis lovecruft
// See LICENSE for licensing information.
//
// Authors:
// - isis agora lovecruft <[email protected]>
//! ed25519 keypairs and batch verification.
use core::default::Default;
use rand::CryptoRng;
use rand::Rng;
#[cfg(feature = "serde")]
use serde::de::Error as SerdeError;
#[cfg(feature = "serde")]
use serde::de::Visitor;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "serde")]
use serde::{Deserializer, Serializer};
pub use sha2::Sha512;
use curve25519_dalek::digest::generic_array::typenum::U64;
pub use curve25519_dalek::digest::Digest;
use curve25519_dalek::constants;
use curve25519_dalek::edwards::EdwardsPoint;
use curve25519_dalek::scalar::Scalar;
pub use crate::constants::*;
pub use crate::errors::*;
pub use crate::public::*;
pub use crate::secret::*;
pub use crate::signature::*;
/// Verify a batch of `signatures` on `messages` with their respective `public_keys`.
///
/// # Inputs
///
/// * `messages` is a slice of byte slices, one per signed message.
/// * `signatures` is a slice of `Signature`s.
/// * `public_keys` is a slice of `PublicKey`s.
/// * `csprng` is an implementation of `Rng + CryptoRng`, such as
/// `rand::rngs::ThreadRng`.
///
/// # Panics
///
/// This function will panic if the `messages, `signatures`, and `public_keys`
/// slices are not equal length.
///
/// # Returns
///
/// * A `Result` whose `Ok` value is an emtpy tuple and whose `Err` value is a
/// `SignatureError` containing a description of the internal error which
/// occured.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::verify_batch;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
/// use rand::rngs::ThreadRng;
///
/// # fn main() {
/// let mut csprng: ThreadRng = thread_rng();
/// let keypairs: Vec<Keypair> = (0..64).map(|_| Keypair::generate(&mut csprng)).collect();
/// let msg: &[u8] = b"They're good dogs Brant";
/// let messages: Vec<&[u8]> = (0..64).map(|_| msg).collect();
/// let signatures: Vec<Signature> = keypairs.iter().map(|key| key.sign(&msg)).collect();
/// let public_keys: Vec<PublicKey> = keypairs.iter().map(|key| key.public).collect();
///
/// let result = verify_batch(&messages[..], &signatures[..], &public_keys[..]);
/// assert!(result.is_ok());
/// # }
/// ```
#[cfg(any(feature = "alloc", feature = "std"))]
#[allow(non_snake_case)]
pub fn verify_batch(
messages: &[&[u8]],
signatures: &[Signature],
public_keys: &[PublicKey],
) -> Result<(), SignatureError>
{
const ASSERT_MESSAGE: &'static [u8] = b"The number of messages, signatures, and public keys must be equal.";
assert!(signatures.len() == messages.len(), ASSERT_MESSAGE);
assert!(signatures.len() == public_keys.len(), ASSERT_MESSAGE);
assert!(public_keys.len() == messages.len(), ASSERT_MESSAGE);
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "std")]
use std::vec::Vec;
use core::iter::once;
use rand::thread_rng;
use curve25519_dalek::traits::IsIdentity;
use curve25519_dalek::traits::VartimeMultiscalarMul;
// Select a random 128-bit scalar for each signature.
let zs: Vec<Scalar> = signatures
.iter()
.map(|_| Scalar::from(thread_rng().gen::<u128>()))
.collect();
// Compute the basepoint coefficient, ∑ s[i]z[i] (mod l)
let B_coefficient: Scalar = signatures
.iter()
.map(|sig| sig.s)
.zip(zs.iter())
.map(|(s, z)| z * s)
.sum();
// Compute H(R || A || M) for each (signature, public_key, message) triplet
let hrams = (0..signatures.len()).map(|i| {
let mut h: Sha512 = Sha512::default();
h.input(signatures[i].R.as_bytes());
h.input(public_keys[i].as_bytes());
h.input(&messages[i]);
Scalar::from_hash(h)
});
// Multiply each H(R || A || M) by the random value
let zhrams = hrams.zip(zs.iter()).map(|(hram, z)| hram * z);
let Rs = signatures.iter().map(|sig| sig.R.decompress());
let As = public_keys.iter().map(|pk| Some(pk.1));
let B = once(Some(constants::ED25519_BASEPOINT_POINT));
// Compute (-∑ z[i]s[i] (mod l)) B + ∑ z[i]R[i] + ∑ (z[i]H(R||A||M)[i] (mod l)) A[i] = 0
let id = EdwardsPoint::optional_multiscalar_mul(
once(-B_coefficient).chain(zs.iter().cloned()).chain(zhrams),
B.chain(Rs).chain(As),
).ok_or_else(|| SignatureError(InternalError::VerifyError))?;
if id.is_identity() {
Ok(())
} else {
Err(SignatureError(InternalError::VerifyError))
}
}
/// An ed25519 keypair.
#[derive(Debug, Default)] // we derive Default in order to use the clear() method in Drop
pub struct Keypair {
/// The secret half of this keypair.
pub secret: SecretKey,
/// The public half of this keypair.
pub public: PublicKey,
}
impl Keypair {
/// Convert this keypair to bytes.
///
/// # Returns
///
/// An array of bytes, `[u8; KEYPAIR_LENGTH]`. The first
/// `SECRET_KEY_LENGTH` of bytes is the `SecretKey`, and the next
/// `PUBLIC_KEY_LENGTH` bytes is the `PublicKey` (the same as other
/// libraries, such as [Adam Langley's ed25519 Golang
/// implementation](https://github.com/agl/ed25519/)).
pub fn to_bytes(&self) -> [u8; KEYPAIR_LENGTH] {
let mut bytes: [u8; KEYPAIR_LENGTH] = [0u8; KEYPAIR_LENGTH];
bytes[..SECRET_KEY_LENGTH].copy_from_slice(self.secret.as_bytes());
bytes[SECRET_KEY_LENGTH..].copy_from_slice(self.public.as_bytes());
bytes
}
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `bytes`: an `&[u8]` representing the scalar for the secret key, and a
/// compressed Edwards-Y coordinate of a point on curve25519, both as bytes.
/// (As obtained from `Keypair::to_bytes()`.)
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Result` whose okay value is an EdDSA `Keypair` or whose error value
/// is an `SignatureError` describing the error that occurred.
pub fn from_byt | es: &'a [u8]) -> Result<Keypair, SignatureError> {
if bytes.len()!= KEYPAIR_LENGTH {
return Err(SignatureError(InternalError::BytesLengthError {
name: "Keypair",
length: KEYPAIR_LENGTH,
}));
}
let secret = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH])?;
let public = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..])?;
Ok(Keypair{ secret: secret, public: public })
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate ed25519_dalek;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
///
/// use rand::Rng;
/// use rand::rngs::OsRng;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let keypair: Keypair = Keypair::generate(&mut OsRng);
///
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. `rand_chacha::ChaChaRng`.
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
pub fn generate<R>(csprng: &mut R) -> Keypair
where
R: CryptoRng + Rng,
{
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = (&sk).into();
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign(&self, message: &[u8]) -> Signature {
let expanded: ExpandedSecretKey = (&self.secret).into();
expanded.sign(&message, &self.public)
}
/// Sign a `prehashed_message` with this `Keypair` using the
/// Ed25519ph algorithm defined in [RFC8032 §5.1][rfc8032].
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
///
/// # Returns
///
/// An Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Sha512;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// // Create a hash digest object which we'll feed the message into:
/// let mut prehashed: Sha512 = Sha512::new();
///
/// prehashed.input(message);
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// If you want, you can optionally pass a "context". It is generally a
/// good idea to choose a context and try to make it unique to your project
/// and this specific usage of signatures.
///
/// For example, without this, if you were to [convert your OpenPGP key
/// to a Bitcoin key][terrible_idea] (just as an example, and also Don't
/// Ever Do That) and someone tricked you into signing an "email" which was
/// actually a Bitcoin transaction moving all your magic internet money to
/// their address, it'd be a valid transaction.
///
/// By adding a context, this trick becomes impossible, because the context
/// is concatenated into the hash, which is then signed. So, going with the
/// previous example, if your bitcoin wallet used a context of
/// "BitcoinWalletAppTxnSigning" and OpenPGP used a context (this is likely
/// the least of their safety problems) of "GPGsCryptoIsntConstantTimeLol",
/// then the signatures produced by both could never match the other, even
/// if they signed the exact same message with the same key.
///
/// Let's add a context for good measure (remember, you'll want to choose
/// your own!):
///
/// ```
/// # extern crate ed25519_dalek;
/// # extern crate rand;
/// #
/// # use ed25519_dalek::Digest;
/// # use ed25519_dalek::Keypair;
/// # use ed25519_dalek::Signature;
/// # use ed25519_dalek::Sha512;
/// # use rand::thread_rng;
/// #
/// # #[cfg(feature = "std")]
/// # fn main() {
/// # let mut csprng = thread_rng();
/// # let keypair: Keypair = Keypair::generate(&mut csprng);
/// # let message: &[u8] = b"All I want is to pet all of the dogs.";
/// # let mut prehashed: Sha512 = Sha512::new();
/// # prehashed.input(message);
/// #
/// let context: &[u8] = b"Ed25519DalekSignPrehashedDoctest";
///
/// let sig: Signature = keypair.sign_prehashed(prehashed, Some(context));
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// [rfc8032]: https://tools.ietf.org/html/rfc8032#section-5.1
/// [terrible_idea]: https://github.com/isislovecruft/scripts/blob/master/gpgkey2bc.py
pub fn sign_prehashed<D>(
&self,
prehashed_message: D,
context: Option<&'static [u8]>,
) -> Signature
where
D: Digest<OutputSize = U64>,
{
let expanded: ExpandedSecretKey = (&self.secret).into(); // xxx thanks i hate this
expanded.sign_prehashed(prehashed_message, &self.public, context)
}
/// Verify a signature on a message with this keypair's public key.
pub fn verify(
&self,
message: &[u8],
signature: &Signature
) -> Result<(), SignatureError>
{
self.public.verify(message, signature)
}
/// Verify a `signature` on a `prehashed_message` using the Ed25519ph algorithm.
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
/// * `signature` is a purported Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Returns
///
/// Returns `true` if the `signature` was a valid signature created by this
/// `Keypair` on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
/// use ed25519_dalek::Sha512;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// let mut prehashed: Sha512 = Sha512::default();
/// prehashed.input(message);
///
/// let context: &[u8] = b"Ed25519DalekSignPrehashedDoctest";
///
/// let sig: Signature = keypair.sign_prehashed(prehashed, Some(context));
///
/// // The sha2::Sha512 struct doesn't implement Copy, so we'll have to create a new one:
/// let mut prehashed_again: Sha512 = Sha512::default();
/// prehashed_again.input(message);
///
/// let verified = keypair.public.verify_prehashed(prehashed_again, Some(context), &sig);
///
/// assert!(verified.is_ok());
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// [rfc8032]: https://tools.ietf.org/html/rfc8032#section-5.1
pub fn verify_prehashed<D>(
&self,
prehashed_message: D,
context: Option<&[u8]>,
signature: &Signature,
) -> Result<(), SignatureError>
where
D: Digest<OutputSize = U64>,
{
self.public.verify_prehashed(prehashed_message, context, signature)
}
}
#[cfg(feature = "serde")]
impl Serialize for Keypair {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_bytes(&self.to_bytes()[..])
}
}
#[cfg(feature = "serde")]
impl<'d> Deserialize<'d> for Keypair {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'d>,
{
struct KeypairVisitor;
impl<'d> Visitor<'d> for KeypairVisitor {
type Value = Keypair;
fn expecting(&self, formatter: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
formatter.write_str("An ed25519 keypair, 64 bytes in total where the secret key is \
the first 32 bytes and is in unexpanded form, and the second \
32 bytes is a compressed point for a public key.")
}
fn visit_bytes<E>(self, bytes: &[u8]) -> Result<Keypair, E>
where
E: SerdeError,
{
let secret_key = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH]);
let public_key = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..]);
if secret_key.is_ok() && public_key.is_ok() {
Ok(Keypair{ secret: secret_key.unwrap(), public: public_key.unwrap() })
} else {
Err(SerdeError::invalid_length(bytes.len(), &self))
}
}
}
deserializer.deserialize_bytes(KeypairVisitor)
}
}
#[cfg(test)]
mod test {
use super::*;
use clear_on_drop::clear::Clear;
#[test]
fn keypair_clear_on_drop() {
let mut keypair: Keypair = Keypair::from_bytes(&[1u8; KEYPAIR_LENGTH][..]).unwrap();
keypair.clear();
fn as_bytes<T>(x: &T) -> &[u8] {
use std::mem;
use std::slice;
unsafe { slice::from_raw_parts(x as *const T as *const u8, mem::size_of_val(x)) }
}
assert!(!as_bytes(&keypair).contains(&0x15));
}
}
| es<'a>(byt | identifier_name |
ed25519.rs | // -*- mode: rust; -*-
//
// This file is part of ed25519-dalek.
// Copyright (c) 2017-2019 isis lovecruft
// See LICENSE for licensing information.
//
// Authors:
// - isis agora lovecruft <[email protected]>
//! ed25519 keypairs and batch verification.
use core::default::Default;
use rand::CryptoRng;
use rand::Rng;
#[cfg(feature = "serde")]
use serde::de::Error as SerdeError;
#[cfg(feature = "serde")]
use serde::de::Visitor;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "serde")]
use serde::{Deserializer, Serializer};
pub use sha2::Sha512;
use curve25519_dalek::digest::generic_array::typenum::U64;
pub use curve25519_dalek::digest::Digest;
use curve25519_dalek::constants;
use curve25519_dalek::edwards::EdwardsPoint;
use curve25519_dalek::scalar::Scalar;
pub use crate::constants::*;
pub use crate::errors::*;
pub use crate::public::*;
pub use crate::secret::*;
pub use crate::signature::*;
/// Verify a batch of `signatures` on `messages` with their respective `public_keys`.
///
/// # Inputs
///
/// * `messages` is a slice of byte slices, one per signed message.
/// * `signatures` is a slice of `Signature`s.
/// * `public_keys` is a slice of `PublicKey`s.
/// * `csprng` is an implementation of `Rng + CryptoRng`, such as
/// `rand::rngs::ThreadRng`.
///
/// # Panics
///
/// This function will panic if the `messages, `signatures`, and `public_keys`
/// slices are not equal length.
///
/// # Returns
///
/// * A `Result` whose `Ok` value is an emtpy tuple and whose `Err` value is a
/// `SignatureError` containing a description of the internal error which
/// occured.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::verify_batch;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
/// use rand::rngs::ThreadRng;
///
/// # fn main() {
/// let mut csprng: ThreadRng = thread_rng();
/// let keypairs: Vec<Keypair> = (0..64).map(|_| Keypair::generate(&mut csprng)).collect();
/// let msg: &[u8] = b"They're good dogs Brant";
/// let messages: Vec<&[u8]> = (0..64).map(|_| msg).collect();
/// let signatures: Vec<Signature> = keypairs.iter().map(|key| key.sign(&msg)).collect();
/// let public_keys: Vec<PublicKey> = keypairs.iter().map(|key| key.public).collect();
///
/// let result = verify_batch(&messages[..], &signatures[..], &public_keys[..]);
/// assert!(result.is_ok());
/// # }
/// ```
#[cfg(any(feature = "alloc", feature = "std"))]
#[allow(non_snake_case)]
pub fn verify_batch(
messages: &[&[u8]],
signatures: &[Signature],
public_keys: &[PublicKey],
) -> Result<(), SignatureError>
{
const ASSERT_MESSAGE: &'static [u8] = b"The number of messages, signatures, and public keys must be equal.";
assert!(signatures.len() == messages.len(), ASSERT_MESSAGE);
assert!(signatures.len() == public_keys.len(), ASSERT_MESSAGE);
assert!(public_keys.len() == messages.len(), ASSERT_MESSAGE);
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "std")]
use std::vec::Vec;
use core::iter::once;
use rand::thread_rng;
use curve25519_dalek::traits::IsIdentity;
use curve25519_dalek::traits::VartimeMultiscalarMul;
// Select a random 128-bit scalar for each signature.
let zs: Vec<Scalar> = signatures
.iter()
.map(|_| Scalar::from(thread_rng().gen::<u128>()))
.collect();
// Compute the basepoint coefficient, ∑ s[i]z[i] (mod l)
let B_coefficient: Scalar = signatures
.iter()
.map(|sig| sig.s)
.zip(zs.iter())
.map(|(s, z)| z * s)
.sum();
// Compute H(R || A || M) for each (signature, public_key, message) triplet
let hrams = (0..signatures.len()).map(|i| {
let mut h: Sha512 = Sha512::default();
h.input(signatures[i].R.as_bytes());
h.input(public_keys[i].as_bytes());
h.input(&messages[i]);
Scalar::from_hash(h)
});
// Multiply each H(R || A || M) by the random value
let zhrams = hrams.zip(zs.iter()).map(|(hram, z)| hram * z);
let Rs = signatures.iter().map(|sig| sig.R.decompress());
let As = public_keys.iter().map(|pk| Some(pk.1));
let B = once(Some(constants::ED25519_BASEPOINT_POINT));
// Compute (-∑ z[i]s[i] (mod l)) B + ∑ z[i]R[i] + ∑ (z[i]H(R||A||M)[i] (mod l)) A[i] = 0
let id = EdwardsPoint::optional_multiscalar_mul(
once(-B_coefficient).chain(zs.iter().cloned()).chain(zhrams),
B.chain(Rs).chain(As),
).ok_or_else(|| SignatureError(InternalError::VerifyError))?;
if id.is_identity() {
| Err(SignatureError(InternalError::VerifyError))
}
}
/// An ed25519 keypair.
#[derive(Debug, Default)] // we derive Default in order to use the clear() method in Drop
pub struct Keypair {
/// The secret half of this keypair.
pub secret: SecretKey,
/// The public half of this keypair.
pub public: PublicKey,
}
impl Keypair {
/// Convert this keypair to bytes.
///
/// # Returns
///
/// An array of bytes, `[u8; KEYPAIR_LENGTH]`. The first
/// `SECRET_KEY_LENGTH` of bytes is the `SecretKey`, and the next
/// `PUBLIC_KEY_LENGTH` bytes is the `PublicKey` (the same as other
/// libraries, such as [Adam Langley's ed25519 Golang
/// implementation](https://github.com/agl/ed25519/)).
pub fn to_bytes(&self) -> [u8; KEYPAIR_LENGTH] {
let mut bytes: [u8; KEYPAIR_LENGTH] = [0u8; KEYPAIR_LENGTH];
bytes[..SECRET_KEY_LENGTH].copy_from_slice(self.secret.as_bytes());
bytes[SECRET_KEY_LENGTH..].copy_from_slice(self.public.as_bytes());
bytes
}
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `bytes`: an `&[u8]` representing the scalar for the secret key, and a
/// compressed Edwards-Y coordinate of a point on curve25519, both as bytes.
/// (As obtained from `Keypair::to_bytes()`.)
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Result` whose okay value is an EdDSA `Keypair` or whose error value
/// is an `SignatureError` describing the error that occurred.
pub fn from_bytes<'a>(bytes: &'a [u8]) -> Result<Keypair, SignatureError> {
if bytes.len()!= KEYPAIR_LENGTH {
return Err(SignatureError(InternalError::BytesLengthError {
name: "Keypair",
length: KEYPAIR_LENGTH,
}));
}
let secret = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH])?;
let public = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..])?;
Ok(Keypair{ secret: secret, public: public })
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate ed25519_dalek;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
///
/// use rand::Rng;
/// use rand::rngs::OsRng;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let keypair: Keypair = Keypair::generate(&mut OsRng);
///
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. `rand_chacha::ChaChaRng`.
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
pub fn generate<R>(csprng: &mut R) -> Keypair
where
R: CryptoRng + Rng,
{
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = (&sk).into();
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign(&self, message: &[u8]) -> Signature {
let expanded: ExpandedSecretKey = (&self.secret).into();
expanded.sign(&message, &self.public)
}
/// Sign a `prehashed_message` with this `Keypair` using the
/// Ed25519ph algorithm defined in [RFC8032 §5.1][rfc8032].
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
///
/// # Returns
///
/// An Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Sha512;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// // Create a hash digest object which we'll feed the message into:
/// let mut prehashed: Sha512 = Sha512::new();
///
/// prehashed.input(message);
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// If you want, you can optionally pass a "context". It is generally a
/// good idea to choose a context and try to make it unique to your project
/// and this specific usage of signatures.
///
/// For example, without this, if you were to [convert your OpenPGP key
/// to a Bitcoin key][terrible_idea] (just as an example, and also Don't
/// Ever Do That) and someone tricked you into signing an "email" which was
/// actually a Bitcoin transaction moving all your magic internet money to
/// their address, it'd be a valid transaction.
///
/// By adding a context, this trick becomes impossible, because the context
/// is concatenated into the hash, which is then signed. So, going with the
/// previous example, if your bitcoin wallet used a context of
/// "BitcoinWalletAppTxnSigning" and OpenPGP used a context (this is likely
/// the least of their safety problems) of "GPGsCryptoIsntConstantTimeLol",
/// then the signatures produced by both could never match the other, even
/// if they signed the exact same message with the same key.
///
/// Let's add a context for good measure (remember, you'll want to choose
/// your own!):
///
/// ```
/// # extern crate ed25519_dalek;
/// # extern crate rand;
/// #
/// # use ed25519_dalek::Digest;
/// # use ed25519_dalek::Keypair;
/// # use ed25519_dalek::Signature;
/// # use ed25519_dalek::Sha512;
/// # use rand::thread_rng;
/// #
/// # #[cfg(feature = "std")]
/// # fn main() {
/// # let mut csprng = thread_rng();
/// # let keypair: Keypair = Keypair::generate(&mut csprng);
/// # let message: &[u8] = b"All I want is to pet all of the dogs.";
/// # let mut prehashed: Sha512 = Sha512::new();
/// # prehashed.input(message);
/// #
/// let context: &[u8] = b"Ed25519DalekSignPrehashedDoctest";
///
/// let sig: Signature = keypair.sign_prehashed(prehashed, Some(context));
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// [rfc8032]: https://tools.ietf.org/html/rfc8032#section-5.1
/// [terrible_idea]: https://github.com/isislovecruft/scripts/blob/master/gpgkey2bc.py
pub fn sign_prehashed<D>(
&self,
prehashed_message: D,
context: Option<&'static [u8]>,
) -> Signature
where
D: Digest<OutputSize = U64>,
{
let expanded: ExpandedSecretKey = (&self.secret).into(); // xxx thanks i hate this
expanded.sign_prehashed(prehashed_message, &self.public, context)
}
/// Verify a signature on a message with this keypair's public key.
pub fn verify(
&self,
message: &[u8],
signature: &Signature
) -> Result<(), SignatureError>
{
self.public.verify(message, signature)
}
/// Verify a `signature` on a `prehashed_message` using the Ed25519ph algorithm.
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
/// * `signature` is a purported Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Returns
///
/// Returns `true` if the `signature` was a valid signature created by this
/// `Keypair` on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
/// use ed25519_dalek::Sha512;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// let mut prehashed: Sha512 = Sha512::default();
/// prehashed.input(message);
///
/// let context: &[u8] = b"Ed25519DalekSignPrehashedDoctest";
///
/// let sig: Signature = keypair.sign_prehashed(prehashed, Some(context));
///
/// // The sha2::Sha512 struct doesn't implement Copy, so we'll have to create a new one:
/// let mut prehashed_again: Sha512 = Sha512::default();
/// prehashed_again.input(message);
///
/// let verified = keypair.public.verify_prehashed(prehashed_again, Some(context), &sig);
///
/// assert!(verified.is_ok());
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// [rfc8032]: https://tools.ietf.org/html/rfc8032#section-5.1
pub fn verify_prehashed<D>(
&self,
prehashed_message: D,
context: Option<&[u8]>,
signature: &Signature,
) -> Result<(), SignatureError>
where
D: Digest<OutputSize = U64>,
{
self.public.verify_prehashed(prehashed_message, context, signature)
}
}
#[cfg(feature = "serde")]
impl Serialize for Keypair {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_bytes(&self.to_bytes()[..])
}
}
#[cfg(feature = "serde")]
impl<'d> Deserialize<'d> for Keypair {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'d>,
{
struct KeypairVisitor;
impl<'d> Visitor<'d> for KeypairVisitor {
type Value = Keypair;
fn expecting(&self, formatter: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
formatter.write_str("An ed25519 keypair, 64 bytes in total where the secret key is \
the first 32 bytes and is in unexpanded form, and the second \
32 bytes is a compressed point for a public key.")
}
fn visit_bytes<E>(self, bytes: &[u8]) -> Result<Keypair, E>
where
E: SerdeError,
{
let secret_key = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH]);
let public_key = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..]);
if secret_key.is_ok() && public_key.is_ok() {
Ok(Keypair{ secret: secret_key.unwrap(), public: public_key.unwrap() })
} else {
Err(SerdeError::invalid_length(bytes.len(), &self))
}
}
}
deserializer.deserialize_bytes(KeypairVisitor)
}
}
#[cfg(test)]
mod test {
use super::*;
use clear_on_drop::clear::Clear;
#[test]
fn keypair_clear_on_drop() {
let mut keypair: Keypair = Keypair::from_bytes(&[1u8; KEYPAIR_LENGTH][..]).unwrap();
keypair.clear();
fn as_bytes<T>(x: &T) -> &[u8] {
use std::mem;
use std::slice;
unsafe { slice::from_raw_parts(x as *const T as *const u8, mem::size_of_val(x)) }
}
assert!(!as_bytes(&keypair).contains(&0x15));
}
}
| Ok(())
} else {
| conditional_block |
lib.rs | //! Thread stack traces of remote processes.
//!
//! `rstack` (named after Java's `jstack`) uses [libunwind]'s ptrace interface to capture stack
//! traces of the threads of a remote process. It currently only supports Linux with a kernel
//! version of 3.4 or higher, and requires that the `/proc` pseudo-filesystem be mounted and
//! accessible.
//!
//! [libunwind]: http://www.nongnu.org/libunwind/
#![doc(html_root_url = "https://sfackler.github.io/rstack/doc")]
#![warn(missing_docs)]
extern crate libc;
extern crate unwind;
#[macro_use]
extern crate log;
use libc::{c_void, pid_t, ptrace, waitpid, ESRCH, PTRACE_DETACH, PTRACE_INTERRUPT, PTRACE_SEIZE,
WIFSTOPPED, __WALL};
use std::borrow::Borrow;
use std::result;
use std::io::{self, Read};
use std::fmt;
use std::fs::{self, File};
use std::error;
use std::collections::BTreeSet;
use std::ptr;
use unwind::{Accessors, AddressSpace, Byteorder, Cursor, PTraceState, PTraceStateRef, RegNum};
/// The result type returned by methods in this crate.
pub type Result<T> = result::Result<T, Error>;
| }
/// The error type returned by methods in this crate.
#[derive(Debug)]
pub struct Error(ErrorInner);
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
ErrorInner::Io(ref e) => fmt::Display::fmt(e, fmt),
ErrorInner::Unwind(ref e) => fmt::Display::fmt(e, fmt),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"rstack error"
}
fn cause(&self) -> Option<&error::Error> {
match self.0 {
ErrorInner::Io(ref e) => Some(e),
ErrorInner::Unwind(ref e) => Some(e),
}
}
}
/// Information about a remote process.
#[derive(Debug, Clone)]
pub struct Process {
id: u32,
threads: Vec<Thread>,
}
impl Process {
/// Returns the process's ID.
pub fn id(&self) -> u32 {
self.id
}
/// Returns information about the threads of the process.
pub fn threads(&self) -> &[Thread] {
&self.threads
}
}
/// Information about a thread of a remote process.
#[derive(Debug, Clone)]
pub struct Thread {
id: u32,
name: Option<String>,
frames: Vec<Frame>,
}
impl Thread {
/// Returns the thread's ID.
#[inline]
pub fn id(&self) -> u32 {
self.id
}
/// Returns the thread's name, if known.
#[inline]
pub fn name(&self) -> Option<&str> {
self.name.as_ref().map(|s| &**s)
}
/// Returns the frames of the stack trace representing the state of the thread.
#[inline]
pub fn frames(&self) -> &[Frame] {
&self.frames
}
}
/// Information about a stack frame of a remote process.
#[derive(Debug, Clone)]
pub struct Frame {
ip: usize,
is_signal: Option<bool>,
name: Option<ProcedureName>,
info: Option<ProcedureInfo>,
}
impl Frame {
/// Returns the instruction pointer of the frame.
#[inline]
pub fn ip(&self) -> usize {
self.ip
}
/// Determines if the frame is from a signal handler, if known.
#[inline]
pub fn is_signal(&self) -> Option<bool> {
self.is_signal
}
/// Returns the name of the procedure that this frame is running, if known.
///
/// In certain contexts, particularly when the binary being traced or its dynamic libraries have
/// been stripped, the unwinder may not have enough information to properly identify the
/// procedure and will simply return the first label before the frame's instruction pointer. The
/// offset will always be relative to this label.
#[inline]
pub fn name(&self) -> Option<&ProcedureName> {
self.name.as_ref()
}
/// Returns information about the procedure that this frame is running, if known.
#[inline]
pub fn info(&self) -> Option<&ProcedureInfo> {
self.info.as_ref()
}
}
/// Information about a name of a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureName {
name: String,
offset: usize,
}
impl ProcedureName {
/// Returns the name of the procedure.
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns the offset of the instruction pointer from this procedure's starting address.
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
}
/// Information about a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureInfo {
start_ip: usize,
end_ip: usize,
}
impl ProcedureInfo {
/// Returns the starting address of this procedure.
#[inline]
pub fn start_ip(&self) -> usize {
self.start_ip
}
/// Returns the ending address of this procedure.
#[inline]
pub fn end_ip(&self) -> usize {
self.end_ip
}
}
/// A struct controlling the behavior of tracing.
#[derive(Debug, Clone)]
pub struct TraceOptions {
thread_names: bool,
procedure_names: bool,
procedure_info: bool,
}
impl Default for TraceOptions {
fn default() -> TraceOptions {
TraceOptions {
thread_names: false,
procedure_names: false,
procedure_info: false,
}
}
}
impl TraceOptions {
/// Returns a new `TraceOptions` with default settings.
pub fn new() -> TraceOptions {
TraceOptions::default()
}
/// If set, the names of the process's threads will be recorded.
///
/// Defaults to `false`.
pub fn thread_names(&mut self, thread_names: bool) -> &mut TraceOptions {
self.thread_names = thread_names;
self
}
/// If set, the names of the procedures running in the frames of the process's threads will be
/// recorded.
///
/// Defaults to `false`.
pub fn procedure_names(&mut self, procedure_names: bool) -> &mut TraceOptions {
self.procedure_names = procedure_names;
self
}
/// If set, information about the procedures running in the frames of the process's threads will
/// be recorded.
///
/// Defaults to `false`.
pub fn procedure_info(&mut self, procedure_info: bool) -> &mut TraceOptions {
self.procedure_info = procedure_info;
self
}
/// Traces the threads of the specified process.
pub fn trace(&self, pid: u32) -> Result<Process> {
let space = AddressSpace::new(Accessors::ptrace(), Byteorder::DEFAULT)
.map_err(|e| Error(ErrorInner::Unwind(e)))?;
let threads = get_threads(pid)?;
let mut traces = vec![];
for thread in &threads {
let name = if self.thread_names {
get_name(pid, thread.0)
} else {
None
};
match thread.dump(&space, self) {
Ok(frames) => traces.push(Thread {
id: thread.0,
name,
frames,
}),
Err(e) => debug!("error tracing thread {}: {}", thread.0, e),
}
}
Ok(Process {
id: pid,
threads: traces,
})
}
}
/// A convenience wrapper over `TraceOptions` which returns a maximally verbose trace.
pub fn trace(pid: u32) -> Result<Process> {
TraceOptions::new()
.thread_names(true)
.procedure_names(true)
.procedure_info(true)
.trace(pid)
}
fn get_threads(pid: u32) -> Result<BTreeSet<TracedThread>> {
let mut threads = BTreeSet::new();
let path = format!("/proc/{}/task", pid);
// new threads may be created while we're in the process of stopping them all, so loop a couple
// of times to hopefully converge
for _ in 0..5 {
let prev = threads.len();
add_threads(&mut threads, &path)?;
if prev == threads.len() {
break;
}
}
Ok(threads)
}
fn add_threads(threads: &mut BTreeSet<TracedThread>, dir: &str) -> Result<()> {
for entry in fs::read_dir(dir).map_err(|e| Error(ErrorInner::Io(e)))? {
let entry = entry.map_err(|e| Error(ErrorInner::Io(e)))?;
let pid = match entry
.file_name()
.to_str()
.and_then(|s| s.parse::<u32>().ok())
{
Some(pid) => pid,
None => continue,
};
if!threads.contains(&pid) {
let thread = match TracedThread::new(pid) {
Ok(thread) => thread,
// ESRCH just means the thread died in the middle of things, which is fine
Err(e) => if e.raw_os_error() == Some(ESRCH) {
debug!("error attaching to thread {}: {}", pid, e);
continue;
} else {
return Err(Error(ErrorInner::Io(e)));
},
};
threads.insert(thread);
}
}
Ok(())
}
fn get_name(pid: u32, tid: u32) -> Option<String> {
let path = format!("/proc/{}/task/{}/comm", pid, tid);
let mut name = vec![];
match File::open(path).and_then(|mut f| f.read_to_end(&mut name)) {
Ok(_) => Some(String::from_utf8_lossy(&name).trim().to_string()),
Err(e) => {
debug!("error getting name for thread {}: {}", tid, e);
None
}
}
}
#[derive(PartialOrd, Ord, PartialEq, Eq)]
struct TracedThread(u32);
impl Drop for TracedThread {
fn drop(&mut self) {
unsafe {
ptrace(
PTRACE_DETACH,
self.0 as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
}
}
}
impl Borrow<u32> for TracedThread {
fn borrow(&self) -> &u32 {
&self.0
}
}
impl TracedThread {
fn new(pid: u32) -> io::Result<TracedThread> {
unsafe {
let ret = ptrace(
PTRACE_SEIZE,
pid as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
if ret!= 0 {
return Err(io::Error::last_os_error());
}
let thread = TracedThread(pid);
let ret = ptrace(
PTRACE_INTERRUPT,
pid as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
if ret!= 0 {
return Err(io::Error::last_os_error());
}
let mut status = 0;
while waitpid(pid as pid_t, &mut status, __WALL) < 0 {
let e = io::Error::last_os_error();
if e.kind()!= io::ErrorKind::Interrupted {
return Err(e);
}
}
if!WIFSTOPPED(status) {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("unexpected wait status {}", status),
));
}
Ok(thread)
}
}
fn dump(
&self,
space: &AddressSpace<PTraceStateRef>,
options: &TraceOptions,
) -> unwind::Result<Vec<Frame>> {
let state = PTraceState::new(self.0)?;
let mut cursor = Cursor::remote(&space, &state)?;
let mut trace = vec![];
loop {
let ip = cursor.register(RegNum::IP)? as usize;
let is_signal = cursor.is_signal_frame().ok();
let name = if options.procedure_names {
cursor.procedure_name().ok().map(|n| {
ProcedureName {
name: n.name().to_string(),
offset: n.offset() as usize,
}
})
} else {
None
};
let info = if options.procedure_info {
cursor.procedure_info().ok().map(|i| {
ProcedureInfo {
start_ip: i.start_ip() as usize,
end_ip: i.end_ip() as usize,
}
})
} else {
None
};
trace.push(Frame {
ip,
is_signal,
name,
info,
});
if!cursor.step()? {
break;
}
}
Ok(trace)
}
} | #[derive(Debug)]
enum ErrorInner {
Io(io::Error),
Unwind(unwind::Error), | random_line_split |
lib.rs | //! Thread stack traces of remote processes.
//!
//! `rstack` (named after Java's `jstack`) uses [libunwind]'s ptrace interface to capture stack
//! traces of the threads of a remote process. It currently only supports Linux with a kernel
//! version of 3.4 or higher, and requires that the `/proc` pseudo-filesystem be mounted and
//! accessible.
//!
//! [libunwind]: http://www.nongnu.org/libunwind/
#![doc(html_root_url = "https://sfackler.github.io/rstack/doc")]
#![warn(missing_docs)]
extern crate libc;
extern crate unwind;
#[macro_use]
extern crate log;
use libc::{c_void, pid_t, ptrace, waitpid, ESRCH, PTRACE_DETACH, PTRACE_INTERRUPT, PTRACE_SEIZE,
WIFSTOPPED, __WALL};
use std::borrow::Borrow;
use std::result;
use std::io::{self, Read};
use std::fmt;
use std::fs::{self, File};
use std::error;
use std::collections::BTreeSet;
use std::ptr;
use unwind::{Accessors, AddressSpace, Byteorder, Cursor, PTraceState, PTraceStateRef, RegNum};
/// The result type returned by methods in this crate.
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
enum | {
Io(io::Error),
Unwind(unwind::Error),
}
/// The error type returned by methods in this crate.
#[derive(Debug)]
pub struct Error(ErrorInner);
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
ErrorInner::Io(ref e) => fmt::Display::fmt(e, fmt),
ErrorInner::Unwind(ref e) => fmt::Display::fmt(e, fmt),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"rstack error"
}
fn cause(&self) -> Option<&error::Error> {
match self.0 {
ErrorInner::Io(ref e) => Some(e),
ErrorInner::Unwind(ref e) => Some(e),
}
}
}
/// Information about a remote process.
#[derive(Debug, Clone)]
pub struct Process {
id: u32,
threads: Vec<Thread>,
}
impl Process {
/// Returns the process's ID.
pub fn id(&self) -> u32 {
self.id
}
/// Returns information about the threads of the process.
pub fn threads(&self) -> &[Thread] {
&self.threads
}
}
/// Information about a thread of a remote process.
#[derive(Debug, Clone)]
pub struct Thread {
id: u32,
name: Option<String>,
frames: Vec<Frame>,
}
impl Thread {
/// Returns the thread's ID.
#[inline]
pub fn id(&self) -> u32 {
self.id
}
/// Returns the thread's name, if known.
#[inline]
pub fn name(&self) -> Option<&str> {
self.name.as_ref().map(|s| &**s)
}
/// Returns the frames of the stack trace representing the state of the thread.
#[inline]
pub fn frames(&self) -> &[Frame] {
&self.frames
}
}
/// Information about a stack frame of a remote process.
#[derive(Debug, Clone)]
pub struct Frame {
ip: usize,
is_signal: Option<bool>,
name: Option<ProcedureName>,
info: Option<ProcedureInfo>,
}
impl Frame {
/// Returns the instruction pointer of the frame.
#[inline]
pub fn ip(&self) -> usize {
self.ip
}
/// Determines if the frame is from a signal handler, if known.
#[inline]
pub fn is_signal(&self) -> Option<bool> {
self.is_signal
}
/// Returns the name of the procedure that this frame is running, if known.
///
/// In certain contexts, particularly when the binary being traced or its dynamic libraries have
/// been stripped, the unwinder may not have enough information to properly identify the
/// procedure and will simply return the first label before the frame's instruction pointer. The
/// offset will always be relative to this label.
#[inline]
pub fn name(&self) -> Option<&ProcedureName> {
self.name.as_ref()
}
/// Returns information about the procedure that this frame is running, if known.
#[inline]
pub fn info(&self) -> Option<&ProcedureInfo> {
self.info.as_ref()
}
}
/// Information about a name of a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureName {
name: String,
offset: usize,
}
impl ProcedureName {
/// Returns the name of the procedure.
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns the offset of the instruction pointer from this procedure's starting address.
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
}
/// Information about a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureInfo {
start_ip: usize,
end_ip: usize,
}
impl ProcedureInfo {
/// Returns the starting address of this procedure.
#[inline]
pub fn start_ip(&self) -> usize {
self.start_ip
}
/// Returns the ending address of this procedure.
#[inline]
pub fn end_ip(&self) -> usize {
self.end_ip
}
}
/// A struct controlling the behavior of tracing.
#[derive(Debug, Clone)]
pub struct TraceOptions {
thread_names: bool,
procedure_names: bool,
procedure_info: bool,
}
impl Default for TraceOptions {
fn default() -> TraceOptions {
TraceOptions {
thread_names: false,
procedure_names: false,
procedure_info: false,
}
}
}
impl TraceOptions {
/// Returns a new `TraceOptions` with default settings.
pub fn new() -> TraceOptions {
TraceOptions::default()
}
/// If set, the names of the process's threads will be recorded.
///
/// Defaults to `false`.
pub fn thread_names(&mut self, thread_names: bool) -> &mut TraceOptions {
self.thread_names = thread_names;
self
}
/// If set, the names of the procedures running in the frames of the process's threads will be
/// recorded.
///
/// Defaults to `false`.
pub fn procedure_names(&mut self, procedure_names: bool) -> &mut TraceOptions {
self.procedure_names = procedure_names;
self
}
/// If set, information about the procedures running in the frames of the process's threads will
/// be recorded.
///
/// Defaults to `false`.
pub fn procedure_info(&mut self, procedure_info: bool) -> &mut TraceOptions {
self.procedure_info = procedure_info;
self
}
/// Traces the threads of the specified process.
pub fn trace(&self, pid: u32) -> Result<Process> {
let space = AddressSpace::new(Accessors::ptrace(), Byteorder::DEFAULT)
.map_err(|e| Error(ErrorInner::Unwind(e)))?;
let threads = get_threads(pid)?;
let mut traces = vec![];
for thread in &threads {
let name = if self.thread_names {
get_name(pid, thread.0)
} else {
None
};
match thread.dump(&space, self) {
Ok(frames) => traces.push(Thread {
id: thread.0,
name,
frames,
}),
Err(e) => debug!("error tracing thread {}: {}", thread.0, e),
}
}
Ok(Process {
id: pid,
threads: traces,
})
}
}
/// A convenience wrapper over `TraceOptions` which returns a maximally verbose trace.
pub fn trace(pid: u32) -> Result<Process> {
TraceOptions::new()
.thread_names(true)
.procedure_names(true)
.procedure_info(true)
.trace(pid)
}
fn get_threads(pid: u32) -> Result<BTreeSet<TracedThread>> {
let mut threads = BTreeSet::new();
let path = format!("/proc/{}/task", pid);
// new threads may be created while we're in the process of stopping them all, so loop a couple
// of times to hopefully converge
for _ in 0..5 {
let prev = threads.len();
add_threads(&mut threads, &path)?;
if prev == threads.len() {
break;
}
}
Ok(threads)
}
fn add_threads(threads: &mut BTreeSet<TracedThread>, dir: &str) -> Result<()> {
for entry in fs::read_dir(dir).map_err(|e| Error(ErrorInner::Io(e)))? {
let entry = entry.map_err(|e| Error(ErrorInner::Io(e)))?;
let pid = match entry
.file_name()
.to_str()
.and_then(|s| s.parse::<u32>().ok())
{
Some(pid) => pid,
None => continue,
};
if!threads.contains(&pid) {
let thread = match TracedThread::new(pid) {
Ok(thread) => thread,
// ESRCH just means the thread died in the middle of things, which is fine
Err(e) => if e.raw_os_error() == Some(ESRCH) {
debug!("error attaching to thread {}: {}", pid, e);
continue;
} else {
return Err(Error(ErrorInner::Io(e)));
},
};
threads.insert(thread);
}
}
Ok(())
}
fn get_name(pid: u32, tid: u32) -> Option<String> {
let path = format!("/proc/{}/task/{}/comm", pid, tid);
let mut name = vec![];
match File::open(path).and_then(|mut f| f.read_to_end(&mut name)) {
Ok(_) => Some(String::from_utf8_lossy(&name).trim().to_string()),
Err(e) => {
debug!("error getting name for thread {}: {}", tid, e);
None
}
}
}
#[derive(PartialOrd, Ord, PartialEq, Eq)]
struct TracedThread(u32);
impl Drop for TracedThread {
fn drop(&mut self) {
unsafe {
ptrace(
PTRACE_DETACH,
self.0 as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
}
}
}
impl Borrow<u32> for TracedThread {
fn borrow(&self) -> &u32 {
&self.0
}
}
impl TracedThread {
fn new(pid: u32) -> io::Result<TracedThread> {
unsafe {
let ret = ptrace(
PTRACE_SEIZE,
pid as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
if ret!= 0 {
return Err(io::Error::last_os_error());
}
let thread = TracedThread(pid);
let ret = ptrace(
PTRACE_INTERRUPT,
pid as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
if ret!= 0 {
return Err(io::Error::last_os_error());
}
let mut status = 0;
while waitpid(pid as pid_t, &mut status, __WALL) < 0 {
let e = io::Error::last_os_error();
if e.kind()!= io::ErrorKind::Interrupted {
return Err(e);
}
}
if!WIFSTOPPED(status) {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("unexpected wait status {}", status),
));
}
Ok(thread)
}
}
fn dump(
&self,
space: &AddressSpace<PTraceStateRef>,
options: &TraceOptions,
) -> unwind::Result<Vec<Frame>> {
let state = PTraceState::new(self.0)?;
let mut cursor = Cursor::remote(&space, &state)?;
let mut trace = vec![];
loop {
let ip = cursor.register(RegNum::IP)? as usize;
let is_signal = cursor.is_signal_frame().ok();
let name = if options.procedure_names {
cursor.procedure_name().ok().map(|n| {
ProcedureName {
name: n.name().to_string(),
offset: n.offset() as usize,
}
})
} else {
None
};
let info = if options.procedure_info {
cursor.procedure_info().ok().map(|i| {
ProcedureInfo {
start_ip: i.start_ip() as usize,
end_ip: i.end_ip() as usize,
}
})
} else {
None
};
trace.push(Frame {
ip,
is_signal,
name,
info,
});
if!cursor.step()? {
break;
}
}
Ok(trace)
}
}
| ErrorInner | identifier_name |
lib.rs | //! Thread stack traces of remote processes.
//!
//! `rstack` (named after Java's `jstack`) uses [libunwind]'s ptrace interface to capture stack
//! traces of the threads of a remote process. It currently only supports Linux with a kernel
//! version of 3.4 or higher, and requires that the `/proc` pseudo-filesystem be mounted and
//! accessible.
//!
//! [libunwind]: http://www.nongnu.org/libunwind/
#![doc(html_root_url = "https://sfackler.github.io/rstack/doc")]
#![warn(missing_docs)]
extern crate libc;
extern crate unwind;
#[macro_use]
extern crate log;
use libc::{c_void, pid_t, ptrace, waitpid, ESRCH, PTRACE_DETACH, PTRACE_INTERRUPT, PTRACE_SEIZE,
WIFSTOPPED, __WALL};
use std::borrow::Borrow;
use std::result;
use std::io::{self, Read};
use std::fmt;
use std::fs::{self, File};
use std::error;
use std::collections::BTreeSet;
use std::ptr;
use unwind::{Accessors, AddressSpace, Byteorder, Cursor, PTraceState, PTraceStateRef, RegNum};
/// The result type returned by methods in this crate.
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
enum ErrorInner {
Io(io::Error),
Unwind(unwind::Error),
}
/// The error type returned by methods in this crate.
#[derive(Debug)]
pub struct Error(ErrorInner);
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
ErrorInner::Io(ref e) => fmt::Display::fmt(e, fmt),
ErrorInner::Unwind(ref e) => fmt::Display::fmt(e, fmt),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"rstack error"
}
fn cause(&self) -> Option<&error::Error> {
match self.0 {
ErrorInner::Io(ref e) => Some(e),
ErrorInner::Unwind(ref e) => Some(e),
}
}
}
/// Information about a remote process.
#[derive(Debug, Clone)]
pub struct Process {
id: u32,
threads: Vec<Thread>,
}
impl Process {
/// Returns the process's ID.
pub fn id(&self) -> u32 {
self.id
}
/// Returns information about the threads of the process.
pub fn threads(&self) -> &[Thread] {
&self.threads
}
}
/// Information about a thread of a remote process.
#[derive(Debug, Clone)]
pub struct Thread {
id: u32,
name: Option<String>,
frames: Vec<Frame>,
}
impl Thread {
/// Returns the thread's ID.
#[inline]
pub fn id(&self) -> u32 {
self.id
}
/// Returns the thread's name, if known.
#[inline]
pub fn name(&self) -> Option<&str> {
self.name.as_ref().map(|s| &**s)
}
/// Returns the frames of the stack trace representing the state of the thread.
#[inline]
pub fn frames(&self) -> &[Frame] {
&self.frames
}
}
/// Information about a stack frame of a remote process.
#[derive(Debug, Clone)]
pub struct Frame {
ip: usize,
is_signal: Option<bool>,
name: Option<ProcedureName>,
info: Option<ProcedureInfo>,
}
impl Frame {
/// Returns the instruction pointer of the frame.
#[inline]
pub fn ip(&self) -> usize {
self.ip
}
/// Determines if the frame is from a signal handler, if known.
#[inline]
pub fn is_signal(&self) -> Option<bool> {
self.is_signal
}
/// Returns the name of the procedure that this frame is running, if known.
///
/// In certain contexts, particularly when the binary being traced or its dynamic libraries have
/// been stripped, the unwinder may not have enough information to properly identify the
/// procedure and will simply return the first label before the frame's instruction pointer. The
/// offset will always be relative to this label.
#[inline]
pub fn name(&self) -> Option<&ProcedureName> {
self.name.as_ref()
}
/// Returns information about the procedure that this frame is running, if known.
#[inline]
pub fn info(&self) -> Option<&ProcedureInfo> {
self.info.as_ref()
}
}
/// Information about a name of a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureName {
name: String,
offset: usize,
}
impl ProcedureName {
/// Returns the name of the procedure.
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns the offset of the instruction pointer from this procedure's starting address.
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
}
/// Information about a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureInfo {
start_ip: usize,
end_ip: usize,
}
impl ProcedureInfo {
/// Returns the starting address of this procedure.
#[inline]
pub fn start_ip(&self) -> usize {
self.start_ip
}
/// Returns the ending address of this procedure.
#[inline]
pub fn end_ip(&self) -> usize {
self.end_ip
}
}
/// A struct controlling the behavior of tracing.
#[derive(Debug, Clone)]
pub struct TraceOptions {
thread_names: bool,
procedure_names: bool,
procedure_info: bool,
}
impl Default for TraceOptions {
fn default() -> TraceOptions {
TraceOptions {
thread_names: false,
procedure_names: false,
procedure_info: false,
}
}
}
impl TraceOptions {
/// Returns a new `TraceOptions` with default settings.
pub fn new() -> TraceOptions {
TraceOptions::default()
}
/// If set, the names of the process's threads will be recorded.
///
/// Defaults to `false`.
pub fn thread_names(&mut self, thread_names: bool) -> &mut TraceOptions {
self.thread_names = thread_names;
self
}
/// If set, the names of the procedures running in the frames of the process's threads will be
/// recorded.
///
/// Defaults to `false`.
pub fn procedure_names(&mut self, procedure_names: bool) -> &mut TraceOptions {
self.procedure_names = procedure_names;
self
}
/// If set, information about the procedures running in the frames of the process's threads will
/// be recorded.
///
/// Defaults to `false`.
pub fn procedure_info(&mut self, procedure_info: bool) -> &mut TraceOptions {
self.procedure_info = procedure_info;
self
}
/// Traces the threads of the specified process.
pub fn trace(&self, pid: u32) -> Result<Process> {
let space = AddressSpace::new(Accessors::ptrace(), Byteorder::DEFAULT)
.map_err(|e| Error(ErrorInner::Unwind(e)))?;
let threads = get_threads(pid)?;
let mut traces = vec![];
for thread in &threads {
let name = if self.thread_names {
get_name(pid, thread.0)
} else {
None
};
match thread.dump(&space, self) {
Ok(frames) => traces.push(Thread {
id: thread.0,
name,
frames,
}),
Err(e) => debug!("error tracing thread {}: {}", thread.0, e),
}
}
Ok(Process {
id: pid,
threads: traces,
})
}
}
/// A convenience wrapper over `TraceOptions` which returns a maximally verbose trace.
pub fn trace(pid: u32) -> Result<Process> {
TraceOptions::new()
.thread_names(true)
.procedure_names(true)
.procedure_info(true)
.trace(pid)
}
fn get_threads(pid: u32) -> Result<BTreeSet<TracedThread>> {
let mut threads = BTreeSet::new();
let path = format!("/proc/{}/task", pid);
// new threads may be created while we're in the process of stopping them all, so loop a couple
// of times to hopefully converge
for _ in 0..5 {
let prev = threads.len();
add_threads(&mut threads, &path)?;
if prev == threads.len() {
break;
}
}
Ok(threads)
}
fn add_threads(threads: &mut BTreeSet<TracedThread>, dir: &str) -> Result<()> {
for entry in fs::read_dir(dir).map_err(|e| Error(ErrorInner::Io(e)))? {
let entry = entry.map_err(|e| Error(ErrorInner::Io(e)))?;
let pid = match entry
.file_name()
.to_str()
.and_then(|s| s.parse::<u32>().ok())
{
Some(pid) => pid,
None => continue,
};
if!threads.contains(&pid) {
let thread = match TracedThread::new(pid) {
Ok(thread) => thread,
// ESRCH just means the thread died in the middle of things, which is fine
Err(e) => if e.raw_os_error() == Some(ESRCH) {
debug!("error attaching to thread {}: {}", pid, e);
continue;
} else {
return Err(Error(ErrorInner::Io(e)));
},
};
threads.insert(thread);
}
}
Ok(())
}
fn get_name(pid: u32, tid: u32) -> Option<String> {
let path = format!("/proc/{}/task/{}/comm", pid, tid);
let mut name = vec![];
match File::open(path).and_then(|mut f| f.read_to_end(&mut name)) {
Ok(_) => Some(String::from_utf8_lossy(&name).trim().to_string()),
Err(e) => {
debug!("error getting name for thread {}: {}", tid, e);
None
}
}
}
#[derive(PartialOrd, Ord, PartialEq, Eq)]
struct TracedThread(u32);
impl Drop for TracedThread {
fn drop(&mut self) {
unsafe {
ptrace(
PTRACE_DETACH,
self.0 as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
}
}
}
impl Borrow<u32> for TracedThread {
fn borrow(&self) -> &u32 {
&self.0
}
}
impl TracedThread {
fn new(pid: u32) -> io::Result<TracedThread> {
unsafe {
let ret = ptrace(
PTRACE_SEIZE,
pid as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
if ret!= 0 {
return Err(io::Error::last_os_error());
}
let thread = TracedThread(pid);
let ret = ptrace(
PTRACE_INTERRUPT,
pid as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
if ret!= 0 {
return Err(io::Error::last_os_error());
}
let mut status = 0;
while waitpid(pid as pid_t, &mut status, __WALL) < 0 {
let e = io::Error::last_os_error();
if e.kind()!= io::ErrorKind::Interrupted {
return Err(e);
}
}
if!WIFSTOPPED(status) |
Ok(thread)
}
}
fn dump(
&self,
space: &AddressSpace<PTraceStateRef>,
options: &TraceOptions,
) -> unwind::Result<Vec<Frame>> {
let state = PTraceState::new(self.0)?;
let mut cursor = Cursor::remote(&space, &state)?;
let mut trace = vec![];
loop {
let ip = cursor.register(RegNum::IP)? as usize;
let is_signal = cursor.is_signal_frame().ok();
let name = if options.procedure_names {
cursor.procedure_name().ok().map(|n| {
ProcedureName {
name: n.name().to_string(),
offset: n.offset() as usize,
}
})
} else {
None
};
let info = if options.procedure_info {
cursor.procedure_info().ok().map(|i| {
ProcedureInfo {
start_ip: i.start_ip() as usize,
end_ip: i.end_ip() as usize,
}
})
} else {
None
};
trace.push(Frame {
ip,
is_signal,
name,
info,
});
if!cursor.step()? {
break;
}
}
Ok(trace)
}
}
| {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("unexpected wait status {}", status),
));
} | conditional_block |
lib.rs | //! Thread stack traces of remote processes.
//!
//! `rstack` (named after Java's `jstack`) uses [libunwind]'s ptrace interface to capture stack
//! traces of the threads of a remote process. It currently only supports Linux with a kernel
//! version of 3.4 or higher, and requires that the `/proc` pseudo-filesystem be mounted and
//! accessible.
//!
//! [libunwind]: http://www.nongnu.org/libunwind/
#![doc(html_root_url = "https://sfackler.github.io/rstack/doc")]
#![warn(missing_docs)]
extern crate libc;
extern crate unwind;
#[macro_use]
extern crate log;
use libc::{c_void, pid_t, ptrace, waitpid, ESRCH, PTRACE_DETACH, PTRACE_INTERRUPT, PTRACE_SEIZE,
WIFSTOPPED, __WALL};
use std::borrow::Borrow;
use std::result;
use std::io::{self, Read};
use std::fmt;
use std::fs::{self, File};
use std::error;
use std::collections::BTreeSet;
use std::ptr;
use unwind::{Accessors, AddressSpace, Byteorder, Cursor, PTraceState, PTraceStateRef, RegNum};
/// The result type returned by methods in this crate.
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
enum ErrorInner {
Io(io::Error),
Unwind(unwind::Error),
}
/// The error type returned by methods in this crate.
#[derive(Debug)]
pub struct Error(ErrorInner);
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
ErrorInner::Io(ref e) => fmt::Display::fmt(e, fmt),
ErrorInner::Unwind(ref e) => fmt::Display::fmt(e, fmt),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"rstack error"
}
fn cause(&self) -> Option<&error::Error> {
match self.0 {
ErrorInner::Io(ref e) => Some(e),
ErrorInner::Unwind(ref e) => Some(e),
}
}
}
/// Information about a remote process.
#[derive(Debug, Clone)]
pub struct Process {
id: u32,
threads: Vec<Thread>,
}
impl Process {
/// Returns the process's ID.
pub fn id(&self) -> u32 {
self.id
}
/// Returns information about the threads of the process.
pub fn threads(&self) -> &[Thread] {
&self.threads
}
}
/// Information about a thread of a remote process.
#[derive(Debug, Clone)]
pub struct Thread {
id: u32,
name: Option<String>,
frames: Vec<Frame>,
}
impl Thread {
/// Returns the thread's ID.
#[inline]
pub fn id(&self) -> u32 {
self.id
}
/// Returns the thread's name, if known.
#[inline]
pub fn name(&self) -> Option<&str> {
self.name.as_ref().map(|s| &**s)
}
/// Returns the frames of the stack trace representing the state of the thread.
#[inline]
pub fn frames(&self) -> &[Frame] {
&self.frames
}
}
/// Information about a stack frame of a remote process.
#[derive(Debug, Clone)]
pub struct Frame {
ip: usize,
is_signal: Option<bool>,
name: Option<ProcedureName>,
info: Option<ProcedureInfo>,
}
impl Frame {
/// Returns the instruction pointer of the frame.
#[inline]
pub fn ip(&self) -> usize {
self.ip
}
/// Determines if the frame is from a signal handler, if known.
#[inline]
pub fn is_signal(&self) -> Option<bool> {
self.is_signal
}
/// Returns the name of the procedure that this frame is running, if known.
///
/// In certain contexts, particularly when the binary being traced or its dynamic libraries have
/// been stripped, the unwinder may not have enough information to properly identify the
/// procedure and will simply return the first label before the frame's instruction pointer. The
/// offset will always be relative to this label.
#[inline]
pub fn name(&self) -> Option<&ProcedureName> {
self.name.as_ref()
}
/// Returns information about the procedure that this frame is running, if known.
#[inline]
pub fn info(&self) -> Option<&ProcedureInfo> {
self.info.as_ref()
}
}
/// Information about a name of a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureName {
name: String,
offset: usize,
}
impl ProcedureName {
/// Returns the name of the procedure.
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns the offset of the instruction pointer from this procedure's starting address.
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
}
/// Information about a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureInfo {
start_ip: usize,
end_ip: usize,
}
impl ProcedureInfo {
/// Returns the starting address of this procedure.
#[inline]
pub fn start_ip(&self) -> usize {
self.start_ip
}
/// Returns the ending address of this procedure.
#[inline]
pub fn end_ip(&self) -> usize {
self.end_ip
}
}
/// A struct controlling the behavior of tracing.
#[derive(Debug, Clone)]
pub struct TraceOptions {
thread_names: bool,
procedure_names: bool,
procedure_info: bool,
}
impl Default for TraceOptions {
fn default() -> TraceOptions {
TraceOptions {
thread_names: false,
procedure_names: false,
procedure_info: false,
}
}
}
impl TraceOptions {
/// Returns a new `TraceOptions` with default settings.
pub fn new() -> TraceOptions {
TraceOptions::default()
}
/// If set, the names of the process's threads will be recorded.
///
/// Defaults to `false`.
pub fn thread_names(&mut self, thread_names: bool) -> &mut TraceOptions {
self.thread_names = thread_names;
self
}
/// If set, the names of the procedures running in the frames of the process's threads will be
/// recorded.
///
/// Defaults to `false`.
pub fn procedure_names(&mut self, procedure_names: bool) -> &mut TraceOptions {
self.procedure_names = procedure_names;
self
}
/// If set, information about the procedures running in the frames of the process's threads will
/// be recorded.
///
/// Defaults to `false`.
pub fn procedure_info(&mut self, procedure_info: bool) -> &mut TraceOptions {
self.procedure_info = procedure_info;
self
}
/// Traces the threads of the specified process.
pub fn trace(&self, pid: u32) -> Result<Process> {
let space = AddressSpace::new(Accessors::ptrace(), Byteorder::DEFAULT)
.map_err(|e| Error(ErrorInner::Unwind(e)))?;
let threads = get_threads(pid)?;
let mut traces = vec![];
for thread in &threads {
let name = if self.thread_names {
get_name(pid, thread.0)
} else {
None
};
match thread.dump(&space, self) {
Ok(frames) => traces.push(Thread {
id: thread.0,
name,
frames,
}),
Err(e) => debug!("error tracing thread {}: {}", thread.0, e),
}
}
Ok(Process {
id: pid,
threads: traces,
})
}
}
/// A convenience wrapper over `TraceOptions` which returns a maximally verbose trace.
pub fn trace(pid: u32) -> Result<Process> {
TraceOptions::new()
.thread_names(true)
.procedure_names(true)
.procedure_info(true)
.trace(pid)
}
fn get_threads(pid: u32) -> Result<BTreeSet<TracedThread>> {
let mut threads = BTreeSet::new();
let path = format!("/proc/{}/task", pid);
// new threads may be created while we're in the process of stopping them all, so loop a couple
// of times to hopefully converge
for _ in 0..5 {
let prev = threads.len();
add_threads(&mut threads, &path)?;
if prev == threads.len() {
break;
}
}
Ok(threads)
}
fn add_threads(threads: &mut BTreeSet<TracedThread>, dir: &str) -> Result<()> | } else {
return Err(Error(ErrorInner::Io(e)));
},
};
threads.insert(thread);
}
}
Ok(())
}
fn get_name(pid: u32, tid: u32) -> Option<String> {
let path = format!("/proc/{}/task/{}/comm", pid, tid);
let mut name = vec![];
match File::open(path).and_then(|mut f| f.read_to_end(&mut name)) {
Ok(_) => Some(String::from_utf8_lossy(&name).trim().to_string()),
Err(e) => {
debug!("error getting name for thread {}: {}", tid, e);
None
}
}
}
#[derive(PartialOrd, Ord, PartialEq, Eq)]
struct TracedThread(u32);
impl Drop for TracedThread {
fn drop(&mut self) {
unsafe {
ptrace(
PTRACE_DETACH,
self.0 as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
}
}
}
impl Borrow<u32> for TracedThread {
fn borrow(&self) -> &u32 {
&self.0
}
}
impl TracedThread {
fn new(pid: u32) -> io::Result<TracedThread> {
unsafe {
let ret = ptrace(
PTRACE_SEIZE,
pid as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
if ret!= 0 {
return Err(io::Error::last_os_error());
}
let thread = TracedThread(pid);
let ret = ptrace(
PTRACE_INTERRUPT,
pid as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
if ret!= 0 {
return Err(io::Error::last_os_error());
}
let mut status = 0;
while waitpid(pid as pid_t, &mut status, __WALL) < 0 {
let e = io::Error::last_os_error();
if e.kind()!= io::ErrorKind::Interrupted {
return Err(e);
}
}
if!WIFSTOPPED(status) {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("unexpected wait status {}", status),
));
}
Ok(thread)
}
}
fn dump(
&self,
space: &AddressSpace<PTraceStateRef>,
options: &TraceOptions,
) -> unwind::Result<Vec<Frame>> {
let state = PTraceState::new(self.0)?;
let mut cursor = Cursor::remote(&space, &state)?;
let mut trace = vec![];
loop {
let ip = cursor.register(RegNum::IP)? as usize;
let is_signal = cursor.is_signal_frame().ok();
let name = if options.procedure_names {
cursor.procedure_name().ok().map(|n| {
ProcedureName {
name: n.name().to_string(),
offset: n.offset() as usize,
}
})
} else {
None
};
let info = if options.procedure_info {
cursor.procedure_info().ok().map(|i| {
ProcedureInfo {
start_ip: i.start_ip() as usize,
end_ip: i.end_ip() as usize,
}
})
} else {
None
};
trace.push(Frame {
ip,
is_signal,
name,
info,
});
if!cursor.step()? {
break;
}
}
Ok(trace)
}
}
| {
for entry in fs::read_dir(dir).map_err(|e| Error(ErrorInner::Io(e)))? {
let entry = entry.map_err(|e| Error(ErrorInner::Io(e)))?;
let pid = match entry
.file_name()
.to_str()
.and_then(|s| s.parse::<u32>().ok())
{
Some(pid) => pid,
None => continue,
};
if !threads.contains(&pid) {
let thread = match TracedThread::new(pid) {
Ok(thread) => thread,
// ESRCH just means the thread died in the middle of things, which is fine
Err(e) => if e.raw_os_error() == Some(ESRCH) {
debug!("error attaching to thread {}: {}", pid, e);
continue; | identifier_body |
lib.rs | #![deny(missing_docs, intra_doc_link_resolution_failure)]
//! Topological functions execute within a context unique to the path in the runtime call
//! graph of other topological functions preceding the current activation record.
//!
//! Defining a topological function results in a macro definition for binding the topological
//! function to each callsite where it is invoked.
//!
//! Define a topological function with the `topo::bound` attribute:
//!
//! ```
//! #[topo::bound]
//! fn basic_topo() -> topo::Id { topo::Id::current() }
//!
//! #[topo::bound]
//! fn tier_two() -> topo::Id { basic_topo!() }
//!
//! // each of these functions will be run in separately identified
//! // contexts as the source locations for their calls are different
//! let first = basic_topo!();
//! let second = basic_topo!();
//! assert_ne!(first, second);
//!
//! let third = tier_two!();
//! let fourth = tier_two!();
//! assert_ne!(third, fourth);
//! assert_ne!(first, third);
//! assert_ne!(first, fourth);
//! assert_ne!(second, fourth);
//! ```
//!
//! Because topological functions must be sensitive to the location at which they're invoked and
//! bound to their parent, we transform the function definition into a macro so we can link
//! the two activation records inside macro expansion. See the docs for the attribute for more
//! detail and further discussion of the tradeoffs.
//!
//! TODO include diagram of topology
//!
//! TODO discuss creation of tree from "abstract stack frames" represented by topological
//! invocations
//!
//! TODO discuss propagating environment values down the topological call tree
//!
//! TODO show example of a rendering loop
//!
pub use topo_macro::bound;
use {
owning_ref::OwningRef,
std::{
any::{Any, TypeId},
cell::RefCell,
collections::{hash_map::DefaultHasher, HashMap as Map},
hash::{Hash, Hasher},
mem::replace,
ops::Deref,
rc::Rc,
},
};
/// Calls the provided expression within an [`Env`] bound to the callsite, optionally passing
/// an environment to the child scope.
///
/// ```
/// let prev = topo::Id::current();
/// topo::call!(assert_ne!(prev, topo::Id::current()));
/// ```
///
/// Adding an `env! {... }` directive to the macro input will take ownership of provided values
/// and make them available to the code run in the `Point` created by the invocation.
///
/// ```
/// # use topo;
/// #[derive(Debug, Eq, PartialEq)]
/// struct Submarine(usize);
///
/// assert!(topo::Env::get::<Submarine>().is_none());
///
/// topo::call!({
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
///
/// topo::call!({
/// assert_eq!(&Submarine(2), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(2),
/// });
///
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(1),
/// });
///
/// assert!(topo::Env::get::<Submarine>().is_none());
/// ```
#[macro_export]
macro_rules! call {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: false, call: $($input)*)
}}
}
/// Roots a topology at a particular callsite while calling the provided expression with the same
/// convention as [`call`].
///
/// Normally, when a topological function is repeatedly bound to the same callsite in a loop,
/// each invocation receives a different [`Id`], as these invocations model siblings in the
/// topology. The overall goal of this crate, however, is to provide imperative codepaths with
/// stable identifiers *across* executions at the same callsite. In practice, we must have a root
/// to the subtopology we are maintaining across these impure calls, and after each execution of the
/// subtopology it must reset the state at its [`Id`] so that the next execution of the root
/// is bound to the same point at its parent as its previous execution was. This is...an opaque
/// explanation at best and TODO revise it.
///
/// In this first example, a scope containing the loop can observe each separate loop
/// iteration mutating `count` and the root closure mutating `exit`. The variables `root_ids` and
/// `child_ids` observe the identifiers of the
///
/// ```
/// # use topo::{self, *};
/// # use std::collections::{HashMap, HashSet};
/// struct LoopCount(usize);
///
/// let mut count = 0;
/// let mut exit = false;
/// let mut root_ids = HashSet::new();
/// let mut child_ids = HashMap::new();
/// while!exit {
/// count += 1;
/// topo::root!({
/// root_ids.insert(topo::Id::current());
/// assert_eq!(
/// root_ids.len(),
/// 1,
/// "the Id of this scope should be repeated, not incremented"
/// );
///
/// let outer_count = topo::Env::get::<LoopCount>().unwrap().0;
/// assert!(outer_count <= 10);
/// if outer_count == 10 {
/// exit = true;
/// }
///
/// for i in 0..10 {
/// topo::call!({
/// let current_id = topo::Id::current();
/// if outer_count > 1 {
/// assert_eq!(child_ids[&i], current_id);
/// }
/// child_ids.insert(i, current_id);
/// assert!(
/// child_ids.len() <= 10,
/// "only 10 children should be observed across all loop iterations",
/// );
/// });
/// }
/// assert_eq!(child_ids.len(), 10);
/// }, env! {
/// LoopCount => LoopCount(count),
/// });
/// assert_eq!(child_ids.len(), 10);
/// assert_eq!(root_ids.len(), 1);
/// }
/// ```
#[macro_export]
macro_rules! root {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: true, call: $($input)*)
}}
}
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_raw_call {
(is_root: $is_root:expr, call: $inner:expr $(, env! { $($env:tt)* })?) => {{
struct UwuDaddyRustcGibUniqueTypeIdPlsPls; // thanks for the great name idea, cjm00!
#[allow(unused_mut)]
let mut _new_env = Default::default();
$( _new_env = $crate::env! { $($env)* }; )?
let _reset_to_parent_on_drop_pls = $crate::Point::unstable_pin_prev_enter_child(
std::any::TypeId::of::<UwuDaddyRustcGibUniqueTypeIdPlsPls>(),
_new_env,
$is_root
);
$inner
}};
}
/// Identifies an activation record in the call topology. This is implemented approximately similar
/// to the [hash cons][cons] of preceding topological function invocations' `Id`s.
///
/// TODO explore analogies to instruction and stack pointers?
/// TODO explore more efficient implementations by piggybacking on those?
///
/// [cons]: https://en.wikipedia.org/wiki/Hash_consing
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Id(u64);
impl Id {
/// Returns the `Id` for the current scope in the call topology.
pub fn current() -> Self {
fn assert_send_and_sync<T>()
where
T: Send + Sync,
{
}
assert_send_and_sync::<Id>();
CURRENT_POINT.with(|p| p.borrow().id)
}
}
/// The root of a sub-graph within the overall topology formed at runtime by the call-graph of
/// topological functions.
///
/// The current `Point` contains the local [`Env`], [`Id`], and some additional internal state to
/// uniquely identify each child topological function invocations.
#[derive(Debug)]
pub struct Point {
id: Id,
state: State,
}
thread_local! {
/// The `Point` representing the current dynamic scope.
static CURRENT_POINT: RefCell<Point> = Default::default();
}
impl Point {
/// "Root" a new child [`Point`]. When the guard returned from this function is dropped, the
/// parent point is restored as the "current" `Point`. By calling provided code while the
/// returned guard is live on the stack, we create the tree of indices and environments that
/// correspond to the topological call tree, exiting the child context when the rooted scope
/// ends.
#[doc(hidden)]
pub fn unstable_pin_prev_enter_child(
callsite_ty: TypeId,
add_env: EnvInner,
reset_on_drop: bool,
) -> impl Drop {
CURRENT_POINT.with(|parent| {
let mut parent = parent.borrow_mut();
// this must be copied *before* creating the child below, which will mutate the state
let parent_initial_state = parent.state.clone();
let child = if reset_on_drop {
let mut root = Point::default();
root.state = root.state.child(Callsite::new(callsite_ty, &None), add_env);
root
} else {
parent.child(callsite_ty, add_env)
};
let parent = replace(&mut *parent, child);
scopeguard::guard(
(parent_initial_state, parent),
move |(prev_initial_state, mut prev)| {
if reset_on_drop {
prev.state = prev_initial_state;
}
CURRENT_POINT.with(|p| p.replace(prev));
},
)
})
}
/// Mark a child Point in the topology.
fn child(&mut self, callsite_ty: TypeId, additional: EnvInner) -> Self {
let callsite = Callsite::new(callsite_ty, &self.state.last_child);
let mut hasher = DefaultHasher::new();
self.id.hash(&mut hasher);
self.state.child_count.hash(&mut hasher);
callsite.hash(&mut hasher);
let id = Id(hasher.finish());
Self {
id,
state: self.state.child(callsite, additional),
}
}
/// Runs the provided closure with access to the current [`Point`].
fn with_current<Out>(op: impl FnOnce(&Point) -> Out) -> Out {
CURRENT_POINT.with(|p| op(&*p.borrow()))
}
}
impl Default for Point {
fn default() -> Self {
Self {
id: Id(0),
state: Default::default(),
}
}
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
#[derive(Clone, Debug, Default)]
struct State {
/// The callsite most recently bound to this one as a child.
last_child: Option<Callsite>,
/// The number of children currently bound to this `Point`.
child_count: u16,
/// The current environment.
env: Env,
}
impl State {
fn child(&mut self, callsite: Callsite, additional: EnvInner) -> Self {
self.last_child = Some(callsite);
self.child_count += 1;
Self {
last_child: None,
child_count: 0,
env: self.env.child(additional),
}
}
}
| ty: TypeId,
count: usize,
}
impl Callsite {
fn new(ty: TypeId, last_child: &Option<Callsite>) -> Self {
let prev_count = match last_child {
Some(ref prev) if prev.ty == ty => prev.count,
_ => 0,
};
Self {
ty,
count: prev_count + 1,
}
}
}
/// Immutable environment container for the current (sub)topology. Environment values can be
/// provided by parent topological invocations (currently just with [`call`] and
/// [`root`]), but child functions can only mutate their environment through interior
/// mutability.
///
/// The environment is type-indexed/type-directed, and each `Env` holds 0-1 instances
/// of every [`std::any::Any`]` +'static` type. Access is provided through read-only references.
///
/// Aside: one interesting implication of the above is the ability to define "private scoped global
/// values" which are private to functions which are nonetheless propagating the values with
/// their control flow. This can be useful for runtimes to offer themselves execution-local values
/// in functions which are invoked by external code. It can also be severely abused, like any
/// implicit state, and should be used with caution.
#[derive(Clone, Debug, Default)]
pub struct Env {
inner: Rc<EnvInner>,
}
type EnvInner = Map<TypeId, Rc<dyn Any>>;
impl Env {
/// Returns a reference to a value in the current environment if it has been added to the
/// environment by parent/enclosing [`call`] invocations.
pub fn get<E>() -> Option<impl Deref<Target = E> +'static>
where
E: Any +'static,
{
Point::with_current(|current| {
current
.state
.env
.inner
.get(&TypeId::of::<E>())
.map(|guard| {
OwningRef::new(guard.to_owned()).map(|anon| anon.downcast_ref().unwrap())
})
})
}
/// Returns a reference to a value in the current environment, as [`Env::get`] does, but panics
/// if the value has not been set in the environment.
// TODO typename for debugging here would be v. nice
pub fn expect<E>() -> impl Deref<Target = E> +'static
where
E: Any +'static,
{
Self::get().expect("expected a value from the environment, found none")
}
fn child(&self, additional: EnvInner) -> Env {
let mut new: EnvInner = (*self.inner).to_owned();
new.extend(additional.iter().map(|(t, v)| (*t, v.clone())));
Env {
inner: Rc::new(new),
}
}
}
/// Defines a new macro (named after the first metavariable) which calls a function (named in
/// the second metavariable) in a `Point` specific to this callsite and its parents.
///
/// As a quirk of the `macro_rules!` parser, we have to "bring our own" metavariables for the new
/// macro's args and their expansion for the wrapped function. This makes for an awkward invocation,
/// but it's only invoked from the proc macro attribute for generating topological macros.
///
/// This is used to work around procedural macro hygiene restrictions, allowing us to "generate" a
/// macro from a procedural macro without needing to enable a (as of writing) unstable feature.
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_make_topo_macro {
(
$name:ident $mangled_name:ident
match $matcher:tt
subst $pass:tt
doc ($($docs:tt)*)
) => {
$($docs)*
#[macro_export]
macro_rules! $name {
$matcher => {
topo::unstable_raw_call!(is_root: false, call: $mangled_name $pass)
};
}
};
}
/// Declare additional environment values to expose to a child topological function's call tree.
#[macro_export]
macro_rules! env {
($($env_item_ty:ty => $env_item:expr,)*) => {{
use std::collections::HashMap;
#[allow(unused_mut)]
let mut new_env = HashMap::new();
$({
use std::{
any::{Any, TypeId},
rc::Rc,
};
new_env.insert(
TypeId::of::<$env_item_ty>(),
Rc::new($env_item) as Rc<dyn Any>,
);
})*
new_env
}}
}
#[cfg(test)]
mod tests {
use super::{Env, Id};
#[test]
fn one_child_in_a_loop() {
let root = Id::current();
assert_eq!(root, Id::current());
let mut prev = root;
for _ in 0..100 {
let called;
call!({
let current = Id::current();
assert_ne!(prev, current, "each Id in this loop should be unique");
prev = current;
called = true;
});
// make sure we've returned to an expected baseline
assert_eq!(root, Id::current());
assert!(called);
}
}
#[test]
fn call_env() {
let first_called;
let second_called;
let (first_byte, second_byte) = (0u8, 1u8);
call!(
{
let curr_byte: u8 = *Env::get::<u8>().unwrap();
assert_eq!(curr_byte, first_byte);
first_called = true;
call!(
{
let curr_byte: u8 = *Env::get::<u8>().unwrap();
assert_eq!(curr_byte, second_byte);
second_called = true;
},
env! {
u8 => second_byte,
}
);
assert!(second_called);
assert_eq!(curr_byte, first_byte);
},
env! {
u8 => first_byte,
}
);
assert!(first_called);
assert!(Env::get::<u8>().is_none());
}
} |
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct Callsite {
| random_line_split |
lib.rs | #![deny(missing_docs, intra_doc_link_resolution_failure)]
//! Topological functions execute within a context unique to the path in the runtime call
//! graph of other topological functions preceding the current activation record.
//!
//! Defining a topological function results in a macro definition for binding the topological
//! function to each callsite where it is invoked.
//!
//! Define a topological function with the `topo::bound` attribute:
//!
//! ```
//! #[topo::bound]
//! fn basic_topo() -> topo::Id { topo::Id::current() }
//!
//! #[topo::bound]
//! fn tier_two() -> topo::Id { basic_topo!() }
//!
//! // each of these functions will be run in separately identified
//! // contexts as the source locations for their calls are different
//! let first = basic_topo!();
//! let second = basic_topo!();
//! assert_ne!(first, second);
//!
//! let third = tier_two!();
//! let fourth = tier_two!();
//! assert_ne!(third, fourth);
//! assert_ne!(first, third);
//! assert_ne!(first, fourth);
//! assert_ne!(second, fourth);
//! ```
//!
//! Because topological functions must be sensitive to the location at which they're invoked and
//! bound to their parent, we transform the function definition into a macro so we can link
//! the two activation records inside macro expansion. See the docs for the attribute for more
//! detail and further discussion of the tradeoffs.
//!
//! TODO include diagram of topology
//!
//! TODO discuss creation of tree from "abstract stack frames" represented by topological
//! invocations
//!
//! TODO discuss propagating environment values down the topological call tree
//!
//! TODO show example of a rendering loop
//!
pub use topo_macro::bound;
use {
owning_ref::OwningRef,
std::{
any::{Any, TypeId},
cell::RefCell,
collections::{hash_map::DefaultHasher, HashMap as Map},
hash::{Hash, Hasher},
mem::replace,
ops::Deref,
rc::Rc,
},
};
/// Calls the provided expression within an [`Env`] bound to the callsite, optionally passing
/// an environment to the child scope.
///
/// ```
/// let prev = topo::Id::current();
/// topo::call!(assert_ne!(prev, topo::Id::current()));
/// ```
///
/// Adding an `env! {... }` directive to the macro input will take ownership of provided values
/// and make them available to the code run in the `Point` created by the invocation.
///
/// ```
/// # use topo;
/// #[derive(Debug, Eq, PartialEq)]
/// struct Submarine(usize);
///
/// assert!(topo::Env::get::<Submarine>().is_none());
///
/// topo::call!({
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
///
/// topo::call!({
/// assert_eq!(&Submarine(2), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(2),
/// });
///
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(1),
/// });
///
/// assert!(topo::Env::get::<Submarine>().is_none());
/// ```
#[macro_export]
macro_rules! call {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: false, call: $($input)*)
}}
}
/// Roots a topology at a particular callsite while calling the provided expression with the same
/// convention as [`call`].
///
/// Normally, when a topological function is repeatedly bound to the same callsite in a loop,
/// each invocation receives a different [`Id`], as these invocations model siblings in the
/// topology. The overall goal of this crate, however, is to provide imperative codepaths with
/// stable identifiers *across* executions at the same callsite. In practice, we must have a root
/// to the subtopology we are maintaining across these impure calls, and after each execution of the
/// subtopology it must reset the state at its [`Id`] so that the next execution of the root
/// is bound to the same point at its parent as its previous execution was. This is...an opaque
/// explanation at best and TODO revise it.
///
/// In this first example, a scope containing the loop can observe each separate loop
/// iteration mutating `count` and the root closure mutating `exit`. The variables `root_ids` and
/// `child_ids` observe the identifiers of the
///
/// ```
/// # use topo::{self, *};
/// # use std::collections::{HashMap, HashSet};
/// struct LoopCount(usize);
///
/// let mut count = 0;
/// let mut exit = false;
/// let mut root_ids = HashSet::new();
/// let mut child_ids = HashMap::new();
/// while!exit {
/// count += 1;
/// topo::root!({
/// root_ids.insert(topo::Id::current());
/// assert_eq!(
/// root_ids.len(),
/// 1,
/// "the Id of this scope should be repeated, not incremented"
/// );
///
/// let outer_count = topo::Env::get::<LoopCount>().unwrap().0;
/// assert!(outer_count <= 10);
/// if outer_count == 10 {
/// exit = true;
/// }
///
/// for i in 0..10 {
/// topo::call!({
/// let current_id = topo::Id::current();
/// if outer_count > 1 {
/// assert_eq!(child_ids[&i], current_id);
/// }
/// child_ids.insert(i, current_id);
/// assert!(
/// child_ids.len() <= 10,
/// "only 10 children should be observed across all loop iterations",
/// );
/// });
/// }
/// assert_eq!(child_ids.len(), 10);
/// }, env! {
/// LoopCount => LoopCount(count),
/// });
/// assert_eq!(child_ids.len(), 10);
/// assert_eq!(root_ids.len(), 1);
/// }
/// ```
#[macro_export]
macro_rules! root {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: true, call: $($input)*)
}}
}
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_raw_call {
(is_root: $is_root:expr, call: $inner:expr $(, env! { $($env:tt)* })?) => {{
struct UwuDaddyRustcGibUniqueTypeIdPlsPls; // thanks for the great name idea, cjm00!
#[allow(unused_mut)]
let mut _new_env = Default::default();
$( _new_env = $crate::env! { $($env)* }; )?
let _reset_to_parent_on_drop_pls = $crate::Point::unstable_pin_prev_enter_child(
std::any::TypeId::of::<UwuDaddyRustcGibUniqueTypeIdPlsPls>(),
_new_env,
$is_root
);
$inner
}};
}
/// Identifies an activation record in the call topology. This is implemented approximately similar
/// to the [hash cons][cons] of preceding topological function invocations' `Id`s.
///
/// TODO explore analogies to instruction and stack pointers?
/// TODO explore more efficient implementations by piggybacking on those?
///
/// [cons]: https://en.wikipedia.org/wiki/Hash_consing
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Id(u64);
impl Id {
/// Returns the `Id` for the current scope in the call topology.
pub fn current() -> Self {
fn assert_send_and_sync<T>()
where
T: Send + Sync,
{
}
assert_send_and_sync::<Id>();
CURRENT_POINT.with(|p| p.borrow().id)
}
}
/// The root of a sub-graph within the overall topology formed at runtime by the call-graph of
/// topological functions.
///
/// The current `Point` contains the local [`Env`], [`Id`], and some additional internal state to
/// uniquely identify each child topological function invocations.
#[derive(Debug)]
pub struct Point {
id: Id,
state: State,
}
thread_local! {
/// The `Point` representing the current dynamic scope.
static CURRENT_POINT: RefCell<Point> = Default::default();
}
impl Point {
/// "Root" a new child [`Point`]. When the guard returned from this function is dropped, the
/// parent point is restored as the "current" `Point`. By calling provided code while the
/// returned guard is live on the stack, we create the tree of indices and environments that
/// correspond to the topological call tree, exiting the child context when the rooted scope
/// ends.
#[doc(hidden)]
pub fn unstable_pin_prev_enter_child(
callsite_ty: TypeId,
add_env: EnvInner,
reset_on_drop: bool,
) -> impl Drop {
CURRENT_POINT.with(|parent| {
let mut parent = parent.borrow_mut();
// this must be copied *before* creating the child below, which will mutate the state
let parent_initial_state = parent.state.clone();
let child = if reset_on_drop {
let mut root = Point::default();
root.state = root.state.child(Callsite::new(callsite_ty, &None), add_env);
root
} else {
parent.child(callsite_ty, add_env)
};
let parent = replace(&mut *parent, child);
scopeguard::guard(
(parent_initial_state, parent),
move |(prev_initial_state, mut prev)| {
if reset_on_drop {
prev.state = prev_initial_state;
}
CURRENT_POINT.with(|p| p.replace(prev));
},
)
})
}
/// Mark a child Point in the topology.
fn child(&mut self, callsite_ty: TypeId, additional: EnvInner) -> Self {
let callsite = Callsite::new(callsite_ty, &self.state.last_child);
let mut hasher = DefaultHasher::new();
self.id.hash(&mut hasher);
self.state.child_count.hash(&mut hasher);
callsite.hash(&mut hasher);
let id = Id(hasher.finish());
Self {
id,
state: self.state.child(callsite, additional),
}
}
/// Runs the provided closure with access to the current [`Point`].
fn with_current<Out>(op: impl FnOnce(&Point) -> Out) -> Out {
CURRENT_POINT.with(|p| op(&*p.borrow()))
}
}
impl Default for Point {
fn default() -> Self {
Self {
id: Id(0),
state: Default::default(),
}
}
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
#[derive(Clone, Debug, Default)]
struct State {
/// The callsite most recently bound to this one as a child.
last_child: Option<Callsite>,
/// The number of children currently bound to this `Point`.
child_count: u16,
/// The current environment.
env: Env,
}
impl State {
fn child(&mut self, callsite: Callsite, additional: EnvInner) -> Self {
self.last_child = Some(callsite);
self.child_count += 1;
Self {
last_child: None,
child_count: 0,
env: self.env.child(additional),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct Callsite {
ty: TypeId,
count: usize,
}
impl Callsite {
fn new(ty: TypeId, last_child: &Option<Callsite>) -> Self {
let prev_count = match last_child {
Some(ref prev) if prev.ty == ty => prev.count,
_ => 0,
};
Self {
ty,
count: prev_count + 1,
}
}
}
/// Immutable environment container for the current (sub)topology. Environment values can be
/// provided by parent topological invocations (currently just with [`call`] and
/// [`root`]), but child functions can only mutate their environment through interior
/// mutability.
///
/// The environment is type-indexed/type-directed, and each `Env` holds 0-1 instances
/// of every [`std::any::Any`]` +'static` type. Access is provided through read-only references.
///
/// Aside: one interesting implication of the above is the ability to define "private scoped global
/// values" which are private to functions which are nonetheless propagating the values with
/// their control flow. This can be useful for runtimes to offer themselves execution-local values
/// in functions which are invoked by external code. It can also be severely abused, like any
/// implicit state, and should be used with caution.
#[derive(Clone, Debug, Default)]
pub struct Env {
inner: Rc<EnvInner>,
}
type EnvInner = Map<TypeId, Rc<dyn Any>>;
impl Env {
/// Returns a reference to a value in the current environment if it has been added to the
/// environment by parent/enclosing [`call`] invocations.
pub fn get<E>() -> Option<impl Deref<Target = E> +'static>
where
E: Any +'static,
{
Point::with_current(|current| {
current
.state
.env
.inner
.get(&TypeId::of::<E>())
.map(|guard| {
OwningRef::new(guard.to_owned()).map(|anon| anon.downcast_ref().unwrap())
})
})
}
/// Returns a reference to a value in the current environment, as [`Env::get`] does, but panics
/// if the value has not been set in the environment.
// TODO typename for debugging here would be v. nice
pub fn expect<E>() -> impl Deref<Target = E> +'static
where
E: Any +'static,
{
Self::get().expect("expected a value from the environment, found none")
}
fn child(&self, additional: EnvInner) -> Env {
let mut new: EnvInner = (*self.inner).to_owned();
new.extend(additional.iter().map(|(t, v)| (*t, v.clone())));
Env {
inner: Rc::new(new),
}
}
}
/// Defines a new macro (named after the first metavariable) which calls a function (named in
/// the second metavariable) in a `Point` specific to this callsite and its parents.
///
/// As a quirk of the `macro_rules!` parser, we have to "bring our own" metavariables for the new
/// macro's args and their expansion for the wrapped function. This makes for an awkward invocation,
/// but it's only invoked from the proc macro attribute for generating topological macros.
///
/// This is used to work around procedural macro hygiene restrictions, allowing us to "generate" a
/// macro from a procedural macro without needing to enable a (as of writing) unstable feature.
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_make_topo_macro {
(
$name:ident $mangled_name:ident
match $matcher:tt
subst $pass:tt
doc ($($docs:tt)*)
) => {
$($docs)*
#[macro_export]
macro_rules! $name {
$matcher => {
topo::unstable_raw_call!(is_root: false, call: $mangled_name $pass)
};
}
};
}
/// Declare additional environment values to expose to a child topological function's call tree.
#[macro_export]
macro_rules! env {
($($env_item_ty:ty => $env_item:expr,)*) => {{
use std::collections::HashMap;
#[allow(unused_mut)]
let mut new_env = HashMap::new();
$({
use std::{
any::{Any, TypeId},
rc::Rc,
};
new_env.insert(
TypeId::of::<$env_item_ty>(),
Rc::new($env_item) as Rc<dyn Any>,
);
})*
new_env
}}
}
#[cfg(test)]
mod tests {
use super::{Env, Id};
#[test]
fn one_child_in_a_loop() {
let root = Id::current();
assert_eq!(root, Id::current());
let mut prev = root;
for _ in 0..100 {
let called;
call!({
let current = Id::current();
assert_ne!(prev, current, "each Id in this loop should be unique");
prev = current;
called = true;
});
// make sure we've returned to an expected baseline
assert_eq!(root, Id::current());
assert!(called);
}
}
#[test]
fn | () {
let first_called;
let second_called;
let (first_byte, second_byte) = (0u8, 1u8);
call!(
{
let curr_byte: u8 = *Env::get::<u8>().unwrap();
assert_eq!(curr_byte, first_byte);
first_called = true;
call!(
{
let curr_byte: u8 = *Env::get::<u8>().unwrap();
assert_eq!(curr_byte, second_byte);
second_called = true;
},
env! {
u8 => second_byte,
}
);
assert!(second_called);
assert_eq!(curr_byte, first_byte);
},
env! {
u8 => first_byte,
}
);
assert!(first_called);
assert!(Env::get::<u8>().is_none());
}
}
| call_env | identifier_name |
lib.rs | #![deny(missing_docs, intra_doc_link_resolution_failure)]
//! Topological functions execute within a context unique to the path in the runtime call
//! graph of other topological functions preceding the current activation record.
//!
//! Defining a topological function results in a macro definition for binding the topological
//! function to each callsite where it is invoked.
//!
//! Define a topological function with the `topo::bound` attribute:
//!
//! ```
//! #[topo::bound]
//! fn basic_topo() -> topo::Id { topo::Id::current() }
//!
//! #[topo::bound]
//! fn tier_two() -> topo::Id { basic_topo!() }
//!
//! // each of these functions will be run in separately identified
//! // contexts as the source locations for their calls are different
//! let first = basic_topo!();
//! let second = basic_topo!();
//! assert_ne!(first, second);
//!
//! let third = tier_two!();
//! let fourth = tier_two!();
//! assert_ne!(third, fourth);
//! assert_ne!(first, third);
//! assert_ne!(first, fourth);
//! assert_ne!(second, fourth);
//! ```
//!
//! Because topological functions must be sensitive to the location at which they're invoked and
//! bound to their parent, we transform the function definition into a macro so we can link
//! the two activation records inside macro expansion. See the docs for the attribute for more
//! detail and further discussion of the tradeoffs.
//!
//! TODO include diagram of topology
//!
//! TODO discuss creation of tree from "abstract stack frames" represented by topological
//! invocations
//!
//! TODO discuss propagating environment values down the topological call tree
//!
//! TODO show example of a rendering loop
//!
pub use topo_macro::bound;
use {
owning_ref::OwningRef,
std::{
any::{Any, TypeId},
cell::RefCell,
collections::{hash_map::DefaultHasher, HashMap as Map},
hash::{Hash, Hasher},
mem::replace,
ops::Deref,
rc::Rc,
},
};
/// Calls the provided expression within an [`Env`] bound to the callsite, optionally passing
/// an environment to the child scope.
///
/// ```
/// let prev = topo::Id::current();
/// topo::call!(assert_ne!(prev, topo::Id::current()));
/// ```
///
/// Adding an `env! {... }` directive to the macro input will take ownership of provided values
/// and make them available to the code run in the `Point` created by the invocation.
///
/// ```
/// # use topo;
/// #[derive(Debug, Eq, PartialEq)]
/// struct Submarine(usize);
///
/// assert!(topo::Env::get::<Submarine>().is_none());
///
/// topo::call!({
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
///
/// topo::call!({
/// assert_eq!(&Submarine(2), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(2),
/// });
///
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(1),
/// });
///
/// assert!(topo::Env::get::<Submarine>().is_none());
/// ```
#[macro_export]
macro_rules! call {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: false, call: $($input)*)
}}
}
/// Roots a topology at a particular callsite while calling the provided expression with the same
/// convention as [`call`].
///
/// Normally, when a topological function is repeatedly bound to the same callsite in a loop,
/// each invocation receives a different [`Id`], as these invocations model siblings in the
/// topology. The overall goal of this crate, however, is to provide imperative codepaths with
/// stable identifiers *across* executions at the same callsite. In practice, we must have a root
/// to the subtopology we are maintaining across these impure calls, and after each execution of the
/// subtopology it must reset the state at its [`Id`] so that the next execution of the root
/// is bound to the same point at its parent as its previous execution was. This is...an opaque
/// explanation at best and TODO revise it.
///
/// In this first example, a scope containing the loop can observe each separate loop
/// iteration mutating `count` and the root closure mutating `exit`. The variables `root_ids` and
/// `child_ids` observe the identifiers of the
///
/// ```
/// # use topo::{self, *};
/// # use std::collections::{HashMap, HashSet};
/// struct LoopCount(usize);
///
/// let mut count = 0;
/// let mut exit = false;
/// let mut root_ids = HashSet::new();
/// let mut child_ids = HashMap::new();
/// while!exit {
/// count += 1;
/// topo::root!({
/// root_ids.insert(topo::Id::current());
/// assert_eq!(
/// root_ids.len(),
/// 1,
/// "the Id of this scope should be repeated, not incremented"
/// );
///
/// let outer_count = topo::Env::get::<LoopCount>().unwrap().0;
/// assert!(outer_count <= 10);
/// if outer_count == 10 {
/// exit = true;
/// }
///
/// for i in 0..10 {
/// topo::call!({
/// let current_id = topo::Id::current();
/// if outer_count > 1 {
/// assert_eq!(child_ids[&i], current_id);
/// }
/// child_ids.insert(i, current_id);
/// assert!(
/// child_ids.len() <= 10,
/// "only 10 children should be observed across all loop iterations",
/// );
/// });
/// }
/// assert_eq!(child_ids.len(), 10);
/// }, env! {
/// LoopCount => LoopCount(count),
/// });
/// assert_eq!(child_ids.len(), 10);
/// assert_eq!(root_ids.len(), 1);
/// }
/// ```
#[macro_export]
macro_rules! root {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: true, call: $($input)*)
}}
}
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_raw_call {
(is_root: $is_root:expr, call: $inner:expr $(, env! { $($env:tt)* })?) => {{
struct UwuDaddyRustcGibUniqueTypeIdPlsPls; // thanks for the great name idea, cjm00!
#[allow(unused_mut)]
let mut _new_env = Default::default();
$( _new_env = $crate::env! { $($env)* }; )?
let _reset_to_parent_on_drop_pls = $crate::Point::unstable_pin_prev_enter_child(
std::any::TypeId::of::<UwuDaddyRustcGibUniqueTypeIdPlsPls>(),
_new_env,
$is_root
);
$inner
}};
}
/// Identifies an activation record in the call topology. This is implemented approximately similar
/// to the [hash cons][cons] of preceding topological function invocations' `Id`s.
///
/// TODO explore analogies to instruction and stack pointers?
/// TODO explore more efficient implementations by piggybacking on those?
///
/// [cons]: https://en.wikipedia.org/wiki/Hash_consing
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Id(u64);
impl Id {
/// Returns the `Id` for the current scope in the call topology.
pub fn current() -> Self {
fn assert_send_and_sync<T>()
where
T: Send + Sync,
{
}
assert_send_and_sync::<Id>();
CURRENT_POINT.with(|p| p.borrow().id)
}
}
/// The root of a sub-graph within the overall topology formed at runtime by the call-graph of
/// topological functions.
///
/// The current `Point` contains the local [`Env`], [`Id`], and some additional internal state to
/// uniquely identify each child topological function invocations.
#[derive(Debug)]
pub struct Point {
id: Id,
state: State,
}
thread_local! {
/// The `Point` representing the current dynamic scope.
static CURRENT_POINT: RefCell<Point> = Default::default();
}
impl Point {
/// "Root" a new child [`Point`]. When the guard returned from this function is dropped, the
/// parent point is restored as the "current" `Point`. By calling provided code while the
/// returned guard is live on the stack, we create the tree of indices and environments that
/// correspond to the topological call tree, exiting the child context when the rooted scope
/// ends.
#[doc(hidden)]
pub fn unstable_pin_prev_enter_child(
callsite_ty: TypeId,
add_env: EnvInner,
reset_on_drop: bool,
) -> impl Drop {
CURRENT_POINT.with(|parent| {
let mut parent = parent.borrow_mut();
// this must be copied *before* creating the child below, which will mutate the state
let parent_initial_state = parent.state.clone();
let child = if reset_on_drop {
let mut root = Point::default();
root.state = root.state.child(Callsite::new(callsite_ty, &None), add_env);
root
} else | ;
let parent = replace(&mut *parent, child);
scopeguard::guard(
(parent_initial_state, parent),
move |(prev_initial_state, mut prev)| {
if reset_on_drop {
prev.state = prev_initial_state;
}
CURRENT_POINT.with(|p| p.replace(prev));
},
)
})
}
/// Mark a child Point in the topology.
fn child(&mut self, callsite_ty: TypeId, additional: EnvInner) -> Self {
let callsite = Callsite::new(callsite_ty, &self.state.last_child);
let mut hasher = DefaultHasher::new();
self.id.hash(&mut hasher);
self.state.child_count.hash(&mut hasher);
callsite.hash(&mut hasher);
let id = Id(hasher.finish());
Self {
id,
state: self.state.child(callsite, additional),
}
}
/// Runs the provided closure with access to the current [`Point`].
fn with_current<Out>(op: impl FnOnce(&Point) -> Out) -> Out {
CURRENT_POINT.with(|p| op(&*p.borrow()))
}
}
impl Default for Point {
fn default() -> Self {
Self {
id: Id(0),
state: Default::default(),
}
}
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
#[derive(Clone, Debug, Default)]
struct State {
/// The callsite most recently bound to this one as a child.
last_child: Option<Callsite>,
/// The number of children currently bound to this `Point`.
child_count: u16,
/// The current environment.
env: Env,
}
impl State {
fn child(&mut self, callsite: Callsite, additional: EnvInner) -> Self {
self.last_child = Some(callsite);
self.child_count += 1;
Self {
last_child: None,
child_count: 0,
env: self.env.child(additional),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct Callsite {
ty: TypeId,
count: usize,
}
impl Callsite {
fn new(ty: TypeId, last_child: &Option<Callsite>) -> Self {
let prev_count = match last_child {
Some(ref prev) if prev.ty == ty => prev.count,
_ => 0,
};
Self {
ty,
count: prev_count + 1,
}
}
}
/// Immutable environment container for the current (sub)topology. Environment values can be
/// provided by parent topological invocations (currently just with [`call`] and
/// [`root`]), but child functions can only mutate their environment through interior
/// mutability.
///
/// The environment is type-indexed/type-directed, and each `Env` holds 0-1 instances
/// of every [`std::any::Any`]` +'static` type. Access is provided through read-only references.
///
/// Aside: one interesting implication of the above is the ability to define "private scoped global
/// values" which are private to functions which are nonetheless propagating the values with
/// their control flow. This can be useful for runtimes to offer themselves execution-local values
/// in functions which are invoked by external code. It can also be severely abused, like any
/// implicit state, and should be used with caution.
#[derive(Clone, Debug, Default)]
pub struct Env {
inner: Rc<EnvInner>,
}
type EnvInner = Map<TypeId, Rc<dyn Any>>;
impl Env {
/// Returns a reference to a value in the current environment if it has been added to the
/// environment by parent/enclosing [`call`] invocations.
pub fn get<E>() -> Option<impl Deref<Target = E> +'static>
where
E: Any +'static,
{
Point::with_current(|current| {
current
.state
.env
.inner
.get(&TypeId::of::<E>())
.map(|guard| {
OwningRef::new(guard.to_owned()).map(|anon| anon.downcast_ref().unwrap())
})
})
}
/// Returns a reference to a value in the current environment, as [`Env::get`] does, but panics
/// if the value has not been set in the environment.
// TODO typename for debugging here would be v. nice
pub fn expect<E>() -> impl Deref<Target = E> +'static
where
E: Any +'static,
{
Self::get().expect("expected a value from the environment, found none")
}
fn child(&self, additional: EnvInner) -> Env {
let mut new: EnvInner = (*self.inner).to_owned();
new.extend(additional.iter().map(|(t, v)| (*t, v.clone())));
Env {
inner: Rc::new(new),
}
}
}
/// Defines a new macro (named after the first metavariable) which calls a function (named in
/// the second metavariable) in a `Point` specific to this callsite and its parents.
///
/// As a quirk of the `macro_rules!` parser, we have to "bring our own" metavariables for the new
/// macro's args and their expansion for the wrapped function. This makes for an awkward invocation,
/// but it's only invoked from the proc macro attribute for generating topological macros.
///
/// This is used to work around procedural macro hygiene restrictions, allowing us to "generate" a
/// macro from a procedural macro without needing to enable a (as of writing) unstable feature.
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_make_topo_macro {
(
$name:ident $mangled_name:ident
match $matcher:tt
subst $pass:tt
doc ($($docs:tt)*)
) => {
$($docs)*
#[macro_export]
macro_rules! $name {
$matcher => {
topo::unstable_raw_call!(is_root: false, call: $mangled_name $pass)
};
}
};
}
/// Declare additional environment values to expose to a child topological function's call tree.
#[macro_export]
macro_rules! env {
($($env_item_ty:ty => $env_item:expr,)*) => {{
use std::collections::HashMap;
#[allow(unused_mut)]
let mut new_env = HashMap::new();
$({
use std::{
any::{Any, TypeId},
rc::Rc,
};
new_env.insert(
TypeId::of::<$env_item_ty>(),
Rc::new($env_item) as Rc<dyn Any>,
);
})*
new_env
}}
}
#[cfg(test)]
mod tests {
use super::{Env, Id};
#[test]
fn one_child_in_a_loop() {
let root = Id::current();
assert_eq!(root, Id::current());
let mut prev = root;
for _ in 0..100 {
let called;
call!({
let current = Id::current();
assert_ne!(prev, current, "each Id in this loop should be unique");
prev = current;
called = true;
});
// make sure we've returned to an expected baseline
assert_eq!(root, Id::current());
assert!(called);
}
}
#[test]
fn call_env() {
let first_called;
let second_called;
let (first_byte, second_byte) = (0u8, 1u8);
call!(
{
let curr_byte: u8 = *Env::get::<u8>().unwrap();
assert_eq!(curr_byte, first_byte);
first_called = true;
call!(
{
let curr_byte: u8 = *Env::get::<u8>().unwrap();
assert_eq!(curr_byte, second_byte);
second_called = true;
},
env! {
u8 => second_byte,
}
);
assert!(second_called);
assert_eq!(curr_byte, first_byte);
},
env! {
u8 => first_byte,
}
);
assert!(first_called);
assert!(Env::get::<u8>().is_none());
}
}
| {
parent.child(callsite_ty, add_env)
} | conditional_block |
lib.rs | #![deny(missing_docs, intra_doc_link_resolution_failure)]
//! Topological functions execute within a context unique to the path in the runtime call
//! graph of other topological functions preceding the current activation record.
//!
//! Defining a topological function results in a macro definition for binding the topological
//! function to each callsite where it is invoked.
//!
//! Define a topological function with the `topo::bound` attribute:
//!
//! ```
//! #[topo::bound]
//! fn basic_topo() -> topo::Id { topo::Id::current() }
//!
//! #[topo::bound]
//! fn tier_two() -> topo::Id { basic_topo!() }
//!
//! // each of these functions will be run in separately identified
//! // contexts as the source locations for their calls are different
//! let first = basic_topo!();
//! let second = basic_topo!();
//! assert_ne!(first, second);
//!
//! let third = tier_two!();
//! let fourth = tier_two!();
//! assert_ne!(third, fourth);
//! assert_ne!(first, third);
//! assert_ne!(first, fourth);
//! assert_ne!(second, fourth);
//! ```
//!
//! Because topological functions must be sensitive to the location at which they're invoked and
//! bound to their parent, we transform the function definition into a macro so we can link
//! the two activation records inside macro expansion. See the docs for the attribute for more
//! detail and further discussion of the tradeoffs.
//!
//! TODO include diagram of topology
//!
//! TODO discuss creation of tree from "abstract stack frames" represented by topological
//! invocations
//!
//! TODO discuss propagating environment values down the topological call tree
//!
//! TODO show example of a rendering loop
//!
pub use topo_macro::bound;
use {
owning_ref::OwningRef,
std::{
any::{Any, TypeId},
cell::RefCell,
collections::{hash_map::DefaultHasher, HashMap as Map},
hash::{Hash, Hasher},
mem::replace,
ops::Deref,
rc::Rc,
},
};
/// Calls the provided expression within an [`Env`] bound to the callsite, optionally passing
/// an environment to the child scope.
///
/// ```
/// let prev = topo::Id::current();
/// topo::call!(assert_ne!(prev, topo::Id::current()));
/// ```
///
/// Adding an `env! {... }` directive to the macro input will take ownership of provided values
/// and make them available to the code run in the `Point` created by the invocation.
///
/// ```
/// # use topo;
/// #[derive(Debug, Eq, PartialEq)]
/// struct Submarine(usize);
///
/// assert!(topo::Env::get::<Submarine>().is_none());
///
/// topo::call!({
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
///
/// topo::call!({
/// assert_eq!(&Submarine(2), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(2),
/// });
///
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(1),
/// });
///
/// assert!(topo::Env::get::<Submarine>().is_none());
/// ```
#[macro_export]
macro_rules! call {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: false, call: $($input)*)
}}
}
/// Roots a topology at a particular callsite while calling the provided expression with the same
/// convention as [`call`].
///
/// Normally, when a topological function is repeatedly bound to the same callsite in a loop,
/// each invocation receives a different [`Id`], as these invocations model siblings in the
/// topology. The overall goal of this crate, however, is to provide imperative codepaths with
/// stable identifiers *across* executions at the same callsite. In practice, we must have a root
/// to the subtopology we are maintaining across these impure calls, and after each execution of the
/// subtopology it must reset the state at its [`Id`] so that the next execution of the root
/// is bound to the same point at its parent as its previous execution was. This is...an opaque
/// explanation at best and TODO revise it.
///
/// In this first example, a scope containing the loop can observe each separate loop
/// iteration mutating `count` and the root closure mutating `exit`. The variables `root_ids` and
/// `child_ids` observe the identifiers of the
///
/// ```
/// # use topo::{self, *};
/// # use std::collections::{HashMap, HashSet};
/// struct LoopCount(usize);
///
/// let mut count = 0;
/// let mut exit = false;
/// let mut root_ids = HashSet::new();
/// let mut child_ids = HashMap::new();
/// while!exit {
/// count += 1;
/// topo::root!({
/// root_ids.insert(topo::Id::current());
/// assert_eq!(
/// root_ids.len(),
/// 1,
/// "the Id of this scope should be repeated, not incremented"
/// );
///
/// let outer_count = topo::Env::get::<LoopCount>().unwrap().0;
/// assert!(outer_count <= 10);
/// if outer_count == 10 {
/// exit = true;
/// }
///
/// for i in 0..10 {
/// topo::call!({
/// let current_id = topo::Id::current();
/// if outer_count > 1 {
/// assert_eq!(child_ids[&i], current_id);
/// }
/// child_ids.insert(i, current_id);
/// assert!(
/// child_ids.len() <= 10,
/// "only 10 children should be observed across all loop iterations",
/// );
/// });
/// }
/// assert_eq!(child_ids.len(), 10);
/// }, env! {
/// LoopCount => LoopCount(count),
/// });
/// assert_eq!(child_ids.len(), 10);
/// assert_eq!(root_ids.len(), 1);
/// }
/// ```
#[macro_export]
macro_rules! root {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: true, call: $($input)*)
}}
}
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_raw_call {
(is_root: $is_root:expr, call: $inner:expr $(, env! { $($env:tt)* })?) => {{
struct UwuDaddyRustcGibUniqueTypeIdPlsPls; // thanks for the great name idea, cjm00!
#[allow(unused_mut)]
let mut _new_env = Default::default();
$( _new_env = $crate::env! { $($env)* }; )?
let _reset_to_parent_on_drop_pls = $crate::Point::unstable_pin_prev_enter_child(
std::any::TypeId::of::<UwuDaddyRustcGibUniqueTypeIdPlsPls>(),
_new_env,
$is_root
);
$inner
}};
}
/// Identifies an activation record in the call topology. This is implemented approximately similar
/// to the [hash cons][cons] of preceding topological function invocations' `Id`s.
///
/// TODO explore analogies to instruction and stack pointers?
/// TODO explore more efficient implementations by piggybacking on those?
///
/// [cons]: https://en.wikipedia.org/wiki/Hash_consing
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Id(u64);
impl Id {
/// Returns the `Id` for the current scope in the call topology.
pub fn current() -> Self {
fn assert_send_and_sync<T>()
where
T: Send + Sync,
{
}
assert_send_and_sync::<Id>();
CURRENT_POINT.with(|p| p.borrow().id)
}
}
/// The root of a sub-graph within the overall topology formed at runtime by the call-graph of
/// topological functions.
///
/// The current `Point` contains the local [`Env`], [`Id`], and some additional internal state to
/// uniquely identify each child topological function invocations.
#[derive(Debug)]
pub struct Point {
id: Id,
state: State,
}
thread_local! {
/// The `Point` representing the current dynamic scope.
static CURRENT_POINT: RefCell<Point> = Default::default();
}
impl Point {
/// "Root" a new child [`Point`]. When the guard returned from this function is dropped, the
/// parent point is restored as the "current" `Point`. By calling provided code while the
/// returned guard is live on the stack, we create the tree of indices and environments that
/// correspond to the topological call tree, exiting the child context when the rooted scope
/// ends.
#[doc(hidden)]
pub fn unstable_pin_prev_enter_child(
callsite_ty: TypeId,
add_env: EnvInner,
reset_on_drop: bool,
) -> impl Drop {
CURRENT_POINT.with(|parent| {
let mut parent = parent.borrow_mut();
// this must be copied *before* creating the child below, which will mutate the state
let parent_initial_state = parent.state.clone();
let child = if reset_on_drop {
let mut root = Point::default();
root.state = root.state.child(Callsite::new(callsite_ty, &None), add_env);
root
} else {
parent.child(callsite_ty, add_env)
};
let parent = replace(&mut *parent, child);
scopeguard::guard(
(parent_initial_state, parent),
move |(prev_initial_state, mut prev)| {
if reset_on_drop {
prev.state = prev_initial_state;
}
CURRENT_POINT.with(|p| p.replace(prev));
},
)
})
}
/// Mark a child Point in the topology.
fn child(&mut self, callsite_ty: TypeId, additional: EnvInner) -> Self {
let callsite = Callsite::new(callsite_ty, &self.state.last_child);
let mut hasher = DefaultHasher::new();
self.id.hash(&mut hasher);
self.state.child_count.hash(&mut hasher);
callsite.hash(&mut hasher);
let id = Id(hasher.finish());
Self {
id,
state: self.state.child(callsite, additional),
}
}
/// Runs the provided closure with access to the current [`Point`].
fn with_current<Out>(op: impl FnOnce(&Point) -> Out) -> Out {
CURRENT_POINT.with(|p| op(&*p.borrow()))
}
}
impl Default for Point {
fn default() -> Self {
Self {
id: Id(0),
state: Default::default(),
}
}
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
#[derive(Clone, Debug, Default)]
struct State {
/// The callsite most recently bound to this one as a child.
last_child: Option<Callsite>,
/// The number of children currently bound to this `Point`.
child_count: u16,
/// The current environment.
env: Env,
}
impl State {
fn child(&mut self, callsite: Callsite, additional: EnvInner) -> Self {
self.last_child = Some(callsite);
self.child_count += 1;
Self {
last_child: None,
child_count: 0,
env: self.env.child(additional),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct Callsite {
ty: TypeId,
count: usize,
}
impl Callsite {
fn new(ty: TypeId, last_child: &Option<Callsite>) -> Self {
let prev_count = match last_child {
Some(ref prev) if prev.ty == ty => prev.count,
_ => 0,
};
Self {
ty,
count: prev_count + 1,
}
}
}
/// Immutable environment container for the current (sub)topology. Environment values can be
/// provided by parent topological invocations (currently just with [`call`] and
/// [`root`]), but child functions can only mutate their environment through interior
/// mutability.
///
/// The environment is type-indexed/type-directed, and each `Env` holds 0-1 instances
/// of every [`std::any::Any`]` +'static` type. Access is provided through read-only references.
///
/// Aside: one interesting implication of the above is the ability to define "private scoped global
/// values" which are private to functions which are nonetheless propagating the values with
/// their control flow. This can be useful for runtimes to offer themselves execution-local values
/// in functions which are invoked by external code. It can also be severely abused, like any
/// implicit state, and should be used with caution.
#[derive(Clone, Debug, Default)]
pub struct Env {
inner: Rc<EnvInner>,
}
type EnvInner = Map<TypeId, Rc<dyn Any>>;
impl Env {
/// Returns a reference to a value in the current environment if it has been added to the
/// environment by parent/enclosing [`call`] invocations.
pub fn get<E>() -> Option<impl Deref<Target = E> +'static>
where
E: Any +'static,
|
/// Returns a reference to a value in the current environment, as [`Env::get`] does, but panics
/// if the value has not been set in the environment.
// TODO typename for debugging here would be v. nice
pub fn expect<E>() -> impl Deref<Target = E> +'static
where
E: Any +'static,
{
Self::get().expect("expected a value from the environment, found none")
}
fn child(&self, additional: EnvInner) -> Env {
let mut new: EnvInner = (*self.inner).to_owned();
new.extend(additional.iter().map(|(t, v)| (*t, v.clone())));
Env {
inner: Rc::new(new),
}
}
}
/// Defines a new macro (named after the first metavariable) which calls a function (named in
/// the second metavariable) in a `Point` specific to this callsite and its parents.
///
/// As a quirk of the `macro_rules!` parser, we have to "bring our own" metavariables for the new
/// macro's args and their expansion for the wrapped function. This makes for an awkward invocation,
/// but it's only invoked from the proc macro attribute for generating topological macros.
///
/// This is used to work around procedural macro hygiene restrictions, allowing us to "generate" a
/// macro from a procedural macro without needing to enable a (as of writing) unstable feature.
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_make_topo_macro {
(
$name:ident $mangled_name:ident
match $matcher:tt
subst $pass:tt
doc ($($docs:tt)*)
) => {
$($docs)*
#[macro_export]
macro_rules! $name {
$matcher => {
topo::unstable_raw_call!(is_root: false, call: $mangled_name $pass)
};
}
};
}
/// Declare additional environment values to expose to a child topological function's call tree.
#[macro_export]
macro_rules! env {
($($env_item_ty:ty => $env_item:expr,)*) => {{
use std::collections::HashMap;
#[allow(unused_mut)]
let mut new_env = HashMap::new();
$({
use std::{
any::{Any, TypeId},
rc::Rc,
};
new_env.insert(
TypeId::of::<$env_item_ty>(),
Rc::new($env_item) as Rc<dyn Any>,
);
})*
new_env
}}
}
#[cfg(test)]
mod tests {
use super::{Env, Id};
#[test]
fn one_child_in_a_loop() {
let root = Id::current();
assert_eq!(root, Id::current());
let mut prev = root;
for _ in 0..100 {
let called;
call!({
let current = Id::current();
assert_ne!(prev, current, "each Id in this loop should be unique");
prev = current;
called = true;
});
// make sure we've returned to an expected baseline
assert_eq!(root, Id::current());
assert!(called);
}
}
#[test]
fn call_env() {
let first_called;
let second_called;
let (first_byte, second_byte) = (0u8, 1u8);
call!(
{
let curr_byte: u8 = *Env::get::<u8>().unwrap();
assert_eq!(curr_byte, first_byte);
first_called = true;
call!(
{
let curr_byte: u8 = *Env::get::<u8>().unwrap();
assert_eq!(curr_byte, second_byte);
second_called = true;
},
env! {
u8 => second_byte,
}
);
assert!(second_called);
assert_eq!(curr_byte, first_byte);
},
env! {
u8 => first_byte,
}
);
assert!(first_called);
assert!(Env::get::<u8>().is_none());
}
}
| {
Point::with_current(|current| {
current
.state
.env
.inner
.get(&TypeId::of::<E>())
.map(|guard| {
OwningRef::new(guard.to_owned()).map(|anon| anon.downcast_ref().unwrap())
})
})
} | identifier_body |
lib.rs | //! Advent of Code - Day 10 Instructions
//!
//! Balance Bots
//!
//! You come upon a factory in which many robots are zooming around handing small microchips
//! to each other.
//!
//! Upon closer examination, you notice that each bot only proceeds when it has two microchips,
//! and once it does, it gives each one to a different bot or puts it in a marked "output" bin.
//! Sometimes, bots take microchips from "input" bins, too.
//!
//! Inspecting one of the microchips, it seems like they each contain a single number; the bots
//! must use some logic to decide what to do with each chip. You access the local control
//! computer and download the bots' instructions (your puzzle input).
//!
//! Some of the instructions specify that a specific-valued microchip should be given to a
//! specific bot; the rest of the instructions indicate what a given bot should do with its
//! lower-value or higher-value chip.
//!
//! For example, consider the following instructions:
//!
//! ```notrust
//! value 5 goes to bot 2
//! bot 2 gives low to bot 1 and high to bot 0
//! value 3 goes to bot 1
//! bot 1 gives low to output 1 and high to bot 0
//! bot 0 gives low to output 2 and high to output 0
//! value 2 goes to bot 2
//! ```
//!
//! - Initially, bot 1 starts with a value-3 chip, and bot 2 starts with a value-2 chip and
//! a value-5 chip.
//! - Because bot 2 has two microchips, it gives its lower one (2) to bot 1 and its higher
//! one (5) to bot 0.
//! - Then, bot 1 has two microchips; it puts the value-2 chip in output 1 and gives the
//! value-3 chip to bot 0.
//! - Finally, bot 0 has two microchips; it puts the 3 in output 2 and the 5 in output 0.
//!
//! In the end, output bin 0 contains a value-5 microchip, output bin 1 contains a value-2
//! microchip, and output bin 2 contains a value-3 microchip. In this configuration, bot
//! number 2 is responsible for comparing value-5 microchips with value-2 microchips.
//!
//! Based on your instructions, what is the number of the bot that is responsible for
//! comparing value-61 microchips with value-17 microchips?
use aoclib::parse;
use std::{
array,
collections::{hash_map::Entry, HashMap, VecDeque},
path::Path,
};
// These typedefs aren't type-safe with each other, but they still
// make it easier to read the code.
pub type Id = u32;
pub type Value = u32;
pub type Bots = HashMap<Id, Bot>;
pub type Outputs = HashMap<Id, Value>;
#[derive(Debug)]
pub struct Output(Id);
#[derive(Debug, Default, Clone)]
pub struct Bot {
pub id: Id,
low: Option<Value>,
high: Option<Value>,
}
impl Bot {
pub fn new(id: Id) -> Bot {
Bot {
id,
..Bot::default()
}
}
/// True if bot has two values
pub fn is_full(&self) -> bool {
self.low.is_some() && self.high.is_some()
}
/// Add a result to this bot, or error if it's full
pub fn add_value(&mut self, mut value: Value) -> Result<(), Error> {
if let Some(mut low) = self.low.take() {
if low > value {
std::mem::swap(&mut low, &mut value);
}
self.low = Some(low);
self.high = Some(value);
} else {
self.low = Some(value);
}
Ok(())
}
}
/// A Receiver is a Bot or an Output: it can receive items.
///
/// In either case, it contains the ID of the destination item
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Receiver {
#[display("bot {0}")]
Bot(Id),
#[display("output {0}")]
Output(Id),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Instruction {
#[display("value {value} goes to bot {bot_id}")]
Get { bot_id: Id, value: Value },
#[display("bot {bot_id} gives low to {low_dest} and high to {high_dest}")]
Transfer {
bot_id: Id,
low_dest: Receiver,
high_dest: Receiver,
},
}
impl Instruction {
pub const fn get(bot_id: Id, value: Value) -> Instruction {
Instruction::Get { bot_id, value }
}
pub const fn | (bot_id: Id, low_dest: Receiver, high_dest: Receiver) -> Instruction {
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
}
}
}
/// Process a list of instructions.
///
/// Be careful--there's no guard currently in place against an incomplete list of instructions
/// leading to an infinite loop.
pub fn process(instructions: &[Instruction]) -> Result<(Bots, Outputs), Error> {
let mut bots = Bots::new();
let mut outputs = Outputs::new();
// convert to double-ended queue
let mut instructions: VecDeque<Instruction> = instructions.iter().copied().collect();
while let Some(instruction) = instructions.pop_front() {
match instruction {
Instruction::Get { value, bot_id } => bots
.entry(bot_id)
.or_insert_with(|| Bot::new(bot_id))
.add_value(value)?,
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
} => {
// clone the bot here to avoid mutable-immutable borrow issues
// bots are small; this is cheap
if let Some(Bot {
low: Some(low),
high: Some(high),
..
}) = bots.get(&bot_id).cloned()
{
// transfer instruction and bot is full
let mut give_to_receiver = |value, receiver| match receiver {
Receiver::Bot(id) => bots
.entry(id)
.or_insert_with(|| Bot::new(id))
.add_value(value),
Receiver::Output(id) => match outputs.entry(id) {
Entry::Occupied(entry) => {
// it's an error to put two different values into the same output
if *entry.get()!= value {
Err(Error::OutputInsert(id, *entry.get(), value))
} else {
Ok(())
}
}
Entry::Vacant(entry) => {
entry.insert(value);
Ok(())
}
},
};
give_to_receiver(low, low_dest)?;
give_to_receiver(high, high_dest)?;
} else {
// bot is not found or not full; try again later
instructions.push_back(Instruction::transfer(bot_id, low_dest, high_dest));
}
}
}
}
Ok((bots, outputs))
}
/// Return the bot ID which handles the specified values
pub fn find_bot_handling(bots: &Bots, mut low: Value, mut high: Value) -> Result<Id, Error> {
// ensure v1 <= v2 for simpler comparisons
if low > high {
std::mem::swap(&mut low, &mut high);
}
bots.values()
.find(|bot| bot.low == Some(low) && bot.high == Some(high))
.map(|bot| bot.id)
.ok_or(Error::NoBotFound(low, high))
}
pub fn part1(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (bots, _) = process(&instructions)?;
let bot = find_bot_handling(&bots, 61, 17)?;
println!("Bot handling (61, 17): {}", bot);
Ok(())
}
pub fn part2(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (_, outputs) = process(&instructions)?;
let chips = array::IntoIter::new([0, 1, 2])
.map(|id| outputs.get(&id).ok_or(Error::NoChipFound(id)))
.collect::<Result<Vec<_>, _>>()?;
let chip_product: Value = chips.into_iter().product();
println!("Product of chips (0, 1, 2): {}", chip_product);
Ok(())
}
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("bot {1} is full but attempted to insert {0}")]
BotInsert(Value, Id),
#[error("could not find bot handling ({0}, {1})")]
NoBotFound(Value, Value),
#[error("output {0} contains {1} but attempted to insert {2}")]
OutputInsert(Id, Value, Value),
#[error("could not find a chip output {0}")]
NoChipFound(Id),
}
#[cfg(test)]
mod tests {
use super::*;
use maplit::hashmap;
const EXAMPLE_INSTRUCTIONS_STR: &[&str] = &[
"value 5 goes to bot 2",
"bot 2 gives low to bot 1 and high to bot 0",
"value 3 goes to bot 1",
"bot 1 gives low to output 1 and high to bot 0",
"bot 0 gives low to output 2 and high to output 0",
"value 2 goes to bot 2",
];
const EXAMPLE_INSTRUCTIONS: &[Instruction] = &[
Instruction::get(2, 5),
Instruction::transfer(2, Receiver::Bot(1), Receiver::Bot(0)),
Instruction::get(1, 3),
Instruction::transfer(1, Receiver::Output(1), Receiver::Bot(0)),
Instruction::transfer(0, Receiver::Output(2), Receiver::Output(0)),
Instruction::get(2, 2),
];
#[test]
fn test_expected() {
let expected_outputs = hashmap! {
0 => 5,
1 => 2,
2 => 3,
};
let (bots, outputs) = process(EXAMPLE_INSTRUCTIONS).unwrap();
println!("Bots:");
for bot in bots.values() {
println!(" {:?}", bot);
}
println!("Outputs: {:?}", outputs);
assert!(outputs == expected_outputs);
assert_eq!(find_bot_handling(&bots, 5, 2).unwrap(), 2);
}
#[test]
fn test_parse() {
for (raw, parsed) in EXAMPLE_INSTRUCTIONS_STR
.iter()
.zip(EXAMPLE_INSTRUCTIONS.iter())
{
println!("Parsing '{}'; expecting {:?}", raw, parsed);
let got = raw.parse::<Instruction>().unwrap();
assert_eq!(got, *parsed);
}
}
}
| transfer | identifier_name |
lib.rs | //! Advent of Code - Day 10 Instructions
//!
//! Balance Bots
//!
//! You come upon a factory in which many robots are zooming around handing small microchips
//! to each other.
//!
//! Upon closer examination, you notice that each bot only proceeds when it has two microchips,
//! and once it does, it gives each one to a different bot or puts it in a marked "output" bin.
//! Sometimes, bots take microchips from "input" bins, too.
//!
//! Inspecting one of the microchips, it seems like they each contain a single number; the bots
//! must use some logic to decide what to do with each chip. You access the local control
//! computer and download the bots' instructions (your puzzle input).
//!
//! Some of the instructions specify that a specific-valued microchip should be given to a
//! specific bot; the rest of the instructions indicate what a given bot should do with its
//! lower-value or higher-value chip.
//!
//! For example, consider the following instructions:
//!
//! ```notrust
//! value 5 goes to bot 2
//! bot 2 gives low to bot 1 and high to bot 0
//! value 3 goes to bot 1
//! bot 1 gives low to output 1 and high to bot 0
//! bot 0 gives low to output 2 and high to output 0
//! value 2 goes to bot 2
//! ```
//!
//! - Initially, bot 1 starts with a value-3 chip, and bot 2 starts with a value-2 chip and
//! a value-5 chip.
//! - Because bot 2 has two microchips, it gives its lower one (2) to bot 1 and its higher
//! one (5) to bot 0.
//! - Then, bot 1 has two microchips; it puts the value-2 chip in output 1 and gives the
//! value-3 chip to bot 0.
//! - Finally, bot 0 has two microchips; it puts the 3 in output 2 and the 5 in output 0.
//!
//! In the end, output bin 0 contains a value-5 microchip, output bin 1 contains a value-2
//! microchip, and output bin 2 contains a value-3 microchip. In this configuration, bot
//! number 2 is responsible for comparing value-5 microchips with value-2 microchips.
//!
//! Based on your instructions, what is the number of the bot that is responsible for
//! comparing value-61 microchips with value-17 microchips?
use aoclib::parse;
use std::{
array,
collections::{hash_map::Entry, HashMap, VecDeque},
path::Path,
};
// These typedefs aren't type-safe with each other, but they still
// make it easier to read the code.
pub type Id = u32;
pub type Value = u32;
pub type Bots = HashMap<Id, Bot>;
pub type Outputs = HashMap<Id, Value>;
#[derive(Debug)]
pub struct Output(Id);
#[derive(Debug, Default, Clone)]
pub struct Bot {
pub id: Id,
low: Option<Value>,
high: Option<Value>,
}
impl Bot {
pub fn new(id: Id) -> Bot {
Bot {
id,
..Bot::default()
}
}
/// True if bot has two values
pub fn is_full(&self) -> bool {
self.low.is_some() && self.high.is_some()
}
/// Add a result to this bot, or error if it's full
pub fn add_value(&mut self, mut value: Value) -> Result<(), Error> {
if let Some(mut low) = self.low.take() {
if low > value {
std::mem::swap(&mut low, &mut value);
}
self.low = Some(low);
self.high = Some(value);
} else {
self.low = Some(value);
}
Ok(())
}
}
/// A Receiver is a Bot or an Output: it can receive items.
///
/// In either case, it contains the ID of the destination item
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Receiver {
#[display("bot {0}")]
Bot(Id),
#[display("output {0}")]
Output(Id),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Instruction {
#[display("value {value} goes to bot {bot_id}")]
Get { bot_id: Id, value: Value },
#[display("bot {bot_id} gives low to {low_dest} and high to {high_dest}")]
Transfer {
bot_id: Id,
low_dest: Receiver,
high_dest: Receiver,
},
}
impl Instruction {
pub const fn get(bot_id: Id, value: Value) -> Instruction {
Instruction::Get { bot_id, value }
}
pub const fn transfer(bot_id: Id, low_dest: Receiver, high_dest: Receiver) -> Instruction {
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
}
}
}
/// Process a list of instructions.
///
/// Be careful--there's no guard currently in place against an incomplete list of instructions
/// leading to an infinite loop.
pub fn process(instructions: &[Instruction]) -> Result<(Bots, Outputs), Error> {
let mut bots = Bots::new();
let mut outputs = Outputs::new();
// convert to double-ended queue
let mut instructions: VecDeque<Instruction> = instructions.iter().copied().collect();
while let Some(instruction) = instructions.pop_front() {
match instruction {
Instruction::Get { value, bot_id } => bots
.entry(bot_id)
.or_insert_with(|| Bot::new(bot_id))
.add_value(value)?,
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
} => {
// clone the bot here to avoid mutable-immutable borrow issues
// bots are small; this is cheap
if let Some(Bot {
low: Some(low),
high: Some(high),
..
}) = bots.get(&bot_id).cloned()
{
// transfer instruction and bot is full
let mut give_to_receiver = |value, receiver| match receiver {
Receiver::Bot(id) => bots
.entry(id)
.or_insert_with(|| Bot::new(id))
.add_value(value),
Receiver::Output(id) => match outputs.entry(id) {
Entry::Occupied(entry) => {
// it's an error to put two different values into the same output
if *entry.get()!= value {
Err(Error::OutputInsert(id, *entry.get(), value))
} else |
}
Entry::Vacant(entry) => {
entry.insert(value);
Ok(())
}
},
};
give_to_receiver(low, low_dest)?;
give_to_receiver(high, high_dest)?;
} else {
// bot is not found or not full; try again later
instructions.push_back(Instruction::transfer(bot_id, low_dest, high_dest));
}
}
}
}
Ok((bots, outputs))
}
/// Return the bot ID which handles the specified values
pub fn find_bot_handling(bots: &Bots, mut low: Value, mut high: Value) -> Result<Id, Error> {
// ensure v1 <= v2 for simpler comparisons
if low > high {
std::mem::swap(&mut low, &mut high);
}
bots.values()
.find(|bot| bot.low == Some(low) && bot.high == Some(high))
.map(|bot| bot.id)
.ok_or(Error::NoBotFound(low, high))
}
pub fn part1(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (bots, _) = process(&instructions)?;
let bot = find_bot_handling(&bots, 61, 17)?;
println!("Bot handling (61, 17): {}", bot);
Ok(())
}
pub fn part2(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (_, outputs) = process(&instructions)?;
let chips = array::IntoIter::new([0, 1, 2])
.map(|id| outputs.get(&id).ok_or(Error::NoChipFound(id)))
.collect::<Result<Vec<_>, _>>()?;
let chip_product: Value = chips.into_iter().product();
println!("Product of chips (0, 1, 2): {}", chip_product);
Ok(())
}
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("bot {1} is full but attempted to insert {0}")]
BotInsert(Value, Id),
#[error("could not find bot handling ({0}, {1})")]
NoBotFound(Value, Value),
#[error("output {0} contains {1} but attempted to insert {2}")]
OutputInsert(Id, Value, Value),
#[error("could not find a chip output {0}")]
NoChipFound(Id),
}
#[cfg(test)]
mod tests {
use super::*;
use maplit::hashmap;
const EXAMPLE_INSTRUCTIONS_STR: &[&str] = &[
"value 5 goes to bot 2",
"bot 2 gives low to bot 1 and high to bot 0",
"value 3 goes to bot 1",
"bot 1 gives low to output 1 and high to bot 0",
"bot 0 gives low to output 2 and high to output 0",
"value 2 goes to bot 2",
];
const EXAMPLE_INSTRUCTIONS: &[Instruction] = &[
Instruction::get(2, 5),
Instruction::transfer(2, Receiver::Bot(1), Receiver::Bot(0)),
Instruction::get(1, 3),
Instruction::transfer(1, Receiver::Output(1), Receiver::Bot(0)),
Instruction::transfer(0, Receiver::Output(2), Receiver::Output(0)),
Instruction::get(2, 2),
];
#[test]
fn test_expected() {
let expected_outputs = hashmap! {
0 => 5,
1 => 2,
2 => 3,
};
let (bots, outputs) = process(EXAMPLE_INSTRUCTIONS).unwrap();
println!("Bots:");
for bot in bots.values() {
println!(" {:?}", bot);
}
println!("Outputs: {:?}", outputs);
assert!(outputs == expected_outputs);
assert_eq!(find_bot_handling(&bots, 5, 2).unwrap(), 2);
}
#[test]
fn test_parse() {
for (raw, parsed) in EXAMPLE_INSTRUCTIONS_STR
.iter()
.zip(EXAMPLE_INSTRUCTIONS.iter())
{
println!("Parsing '{}'; expecting {:?}", raw, parsed);
let got = raw.parse::<Instruction>().unwrap();
assert_eq!(got, *parsed);
}
}
}
| {
Ok(())
} | conditional_block |
lib.rs | //! Advent of Code - Day 10 Instructions
//!
//! Balance Bots
//!
//! You come upon a factory in which many robots are zooming around handing small microchips
//! to each other.
//!
//! Upon closer examination, you notice that each bot only proceeds when it has two microchips,
//! and once it does, it gives each one to a different bot or puts it in a marked "output" bin.
//! Sometimes, bots take microchips from "input" bins, too.
//!
//! Inspecting one of the microchips, it seems like they each contain a single number; the bots
//! must use some logic to decide what to do with each chip. You access the local control
//! computer and download the bots' instructions (your puzzle input).
//!
//! Some of the instructions specify that a specific-valued microchip should be given to a
//! specific bot; the rest of the instructions indicate what a given bot should do with its
//! lower-value or higher-value chip.
//!
//! For example, consider the following instructions:
//!
//! ```notrust
//! value 5 goes to bot 2
//! bot 2 gives low to bot 1 and high to bot 0
//! value 3 goes to bot 1
//! bot 1 gives low to output 1 and high to bot 0
//! bot 0 gives low to output 2 and high to output 0
//! value 2 goes to bot 2
//! ```
//!
//! - Initially, bot 1 starts with a value-3 chip, and bot 2 starts with a value-2 chip and
//! a value-5 chip.
//! - Because bot 2 has two microchips, it gives its lower one (2) to bot 1 and its higher
//! one (5) to bot 0.
//! - Then, bot 1 has two microchips; it puts the value-2 chip in output 1 and gives the
//! value-3 chip to bot 0.
//! - Finally, bot 0 has two microchips; it puts the 3 in output 2 and the 5 in output 0.
//!
//! In the end, output bin 0 contains a value-5 microchip, output bin 1 contains a value-2
//! microchip, and output bin 2 contains a value-3 microchip. In this configuration, bot
//! number 2 is responsible for comparing value-5 microchips with value-2 microchips.
//!
//! Based on your instructions, what is the number of the bot that is responsible for
//! comparing value-61 microchips with value-17 microchips?
use aoclib::parse;
use std::{
array,
collections::{hash_map::Entry, HashMap, VecDeque},
path::Path,
};
// These typedefs aren't type-safe with each other, but they still
// make it easier to read the code.
pub type Id = u32;
pub type Value = u32;
pub type Bots = HashMap<Id, Bot>;
pub type Outputs = HashMap<Id, Value>;
#[derive(Debug)]
pub struct Output(Id);
#[derive(Debug, Default, Clone)]
pub struct Bot {
pub id: Id,
low: Option<Value>,
high: Option<Value>,
}
impl Bot {
pub fn new(id: Id) -> Bot {
Bot {
id,
..Bot::default()
}
}
/// True if bot has two values
pub fn is_full(&self) -> bool {
self.low.is_some() && self.high.is_some()
}
/// Add a result to this bot, or error if it's full
pub fn add_value(&mut self, mut value: Value) -> Result<(), Error> {
if let Some(mut low) = self.low.take() {
if low > value {
std::mem::swap(&mut low, &mut value);
}
self.low = Some(low);
self.high = Some(value);
} else {
self.low = Some(value);
}
Ok(())
}
}
/// A Receiver is a Bot or an Output: it can receive items.
///
/// In either case, it contains the ID of the destination item
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Receiver {
#[display("bot {0}")]
Bot(Id),
#[display("output {0}")]
Output(Id),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Instruction {
#[display("value {value} goes to bot {bot_id}")]
Get { bot_id: Id, value: Value },
#[display("bot {bot_id} gives low to {low_dest} and high to {high_dest}")]
Transfer {
bot_id: Id,
low_dest: Receiver,
high_dest: Receiver,
},
}
impl Instruction {
pub const fn get(bot_id: Id, value: Value) -> Instruction {
Instruction::Get { bot_id, value }
}
pub const fn transfer(bot_id: Id, low_dest: Receiver, high_dest: Receiver) -> Instruction {
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
}
}
}
/// Process a list of instructions.
///
/// Be careful--there's no guard currently in place against an incomplete list of instructions
/// leading to an infinite loop.
pub fn process(instructions: &[Instruction]) -> Result<(Bots, Outputs), Error> {
let mut bots = Bots::new();
let mut outputs = Outputs::new();
// convert to double-ended queue
let mut instructions: VecDeque<Instruction> = instructions.iter().copied().collect();
while let Some(instruction) = instructions.pop_front() {
match instruction {
Instruction::Get { value, bot_id } => bots
.entry(bot_id)
.or_insert_with(|| Bot::new(bot_id))
.add_value(value)?,
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
} => {
// clone the bot here to avoid mutable-immutable borrow issues
// bots are small; this is cheap
if let Some(Bot {
low: Some(low),
high: Some(high),
..
}) = bots.get(&bot_id).cloned()
{
// transfer instruction and bot is full
let mut give_to_receiver = |value, receiver| match receiver {
Receiver::Bot(id) => bots
.entry(id)
.or_insert_with(|| Bot::new(id))
.add_value(value),
Receiver::Output(id) => match outputs.entry(id) {
Entry::Occupied(entry) => {
// it's an error to put two different values into the same output
if *entry.get()!= value {
Err(Error::OutputInsert(id, *entry.get(), value))
} else {
Ok(())
}
}
Entry::Vacant(entry) => {
entry.insert(value);
Ok(())
}
},
};
give_to_receiver(low, low_dest)?;
give_to_receiver(high, high_dest)?;
} else {
// bot is not found or not full; try again later
instructions.push_back(Instruction::transfer(bot_id, low_dest, high_dest));
}
}
}
}
Ok((bots, outputs))
}
/// Return the bot ID which handles the specified values
pub fn find_bot_handling(bots: &Bots, mut low: Value, mut high: Value) -> Result<Id, Error> {
// ensure v1 <= v2 for simpler comparisons
if low > high {
std::mem::swap(&mut low, &mut high);
}
bots.values()
.find(|bot| bot.low == Some(low) && bot.high == Some(high))
.map(|bot| bot.id)
.ok_or(Error::NoBotFound(low, high))
}
pub fn part1(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (bots, _) = process(&instructions)?;
let bot = find_bot_handling(&bots, 61, 17)?;
println!("Bot handling (61, 17): {}", bot);
Ok(())
}
pub fn part2(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (_, outputs) = process(&instructions)?;
let chips = array::IntoIter::new([0, 1, 2])
.map(|id| outputs.get(&id).ok_or(Error::NoChipFound(id)))
.collect::<Result<Vec<_>, _>>()?;
let chip_product: Value = chips.into_iter().product();
println!("Product of chips (0, 1, 2): {}", chip_product);
Ok(())
}
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("bot {1} is full but attempted to insert {0}")]
BotInsert(Value, Id),
#[error("could not find bot handling ({0}, {1})")]
NoBotFound(Value, Value),
#[error("output {0} contains {1} but attempted to insert {2}")]
OutputInsert(Id, Value, Value),
#[error("could not find a chip output {0}")]
NoChipFound(Id),
}
#[cfg(test)]
mod tests {
use super::*;
use maplit::hashmap;
const EXAMPLE_INSTRUCTIONS_STR: &[&str] = &[
"value 5 goes to bot 2",
"bot 2 gives low to bot 1 and high to bot 0",
"value 3 goes to bot 1",
"bot 1 gives low to output 1 and high to bot 0",
"bot 0 gives low to output 2 and high to output 0",
"value 2 goes to bot 2",
];
const EXAMPLE_INSTRUCTIONS: &[Instruction] = &[
Instruction::get(2, 5),
Instruction::transfer(2, Receiver::Bot(1), Receiver::Bot(0)),
Instruction::get(1, 3),
Instruction::transfer(1, Receiver::Output(1), Receiver::Bot(0)),
Instruction::transfer(0, Receiver::Output(2), Receiver::Output(0)),
Instruction::get(2, 2),
];
#[test]
fn test_expected() {
let expected_outputs = hashmap! {
0 => 5,
1 => 2,
2 => 3,
};
let (bots, outputs) = process(EXAMPLE_INSTRUCTIONS).unwrap();
println!("Bots:");
for bot in bots.values() {
println!(" {:?}", bot);
}
println!("Outputs: {:?}", outputs);
assert!(outputs == expected_outputs);
assert_eq!(find_bot_handling(&bots, 5, 2).unwrap(), 2);
}
#[test]
fn test_parse() {
for (raw, parsed) in EXAMPLE_INSTRUCTIONS_STR
.iter()
.zip(EXAMPLE_INSTRUCTIONS.iter())
{
println!("Parsing '{}'; expecting {:?}", raw, parsed);
let got = raw.parse::<Instruction>().unwrap(); | assert_eq!(got, *parsed);
}
}
} | random_line_split |
|
lib.rs | //! Advent of Code - Day 10 Instructions
//!
//! Balance Bots
//!
//! You come upon a factory in which many robots are zooming around handing small microchips
//! to each other.
//!
//! Upon closer examination, you notice that each bot only proceeds when it has two microchips,
//! and once it does, it gives each one to a different bot or puts it in a marked "output" bin.
//! Sometimes, bots take microchips from "input" bins, too.
//!
//! Inspecting one of the microchips, it seems like they each contain a single number; the bots
//! must use some logic to decide what to do with each chip. You access the local control
//! computer and download the bots' instructions (your puzzle input).
//!
//! Some of the instructions specify that a specific-valued microchip should be given to a
//! specific bot; the rest of the instructions indicate what a given bot should do with its
//! lower-value or higher-value chip.
//!
//! For example, consider the following instructions:
//!
//! ```notrust
//! value 5 goes to bot 2
//! bot 2 gives low to bot 1 and high to bot 0
//! value 3 goes to bot 1
//! bot 1 gives low to output 1 and high to bot 0
//! bot 0 gives low to output 2 and high to output 0
//! value 2 goes to bot 2
//! ```
//!
//! - Initially, bot 1 starts with a value-3 chip, and bot 2 starts with a value-2 chip and
//! a value-5 chip.
//! - Because bot 2 has two microchips, it gives its lower one (2) to bot 1 and its higher
//! one (5) to bot 0.
//! - Then, bot 1 has two microchips; it puts the value-2 chip in output 1 and gives the
//! value-3 chip to bot 0.
//! - Finally, bot 0 has two microchips; it puts the 3 in output 2 and the 5 in output 0.
//!
//! In the end, output bin 0 contains a value-5 microchip, output bin 1 contains a value-2
//! microchip, and output bin 2 contains a value-3 microchip. In this configuration, bot
//! number 2 is responsible for comparing value-5 microchips with value-2 microchips.
//!
//! Based on your instructions, what is the number of the bot that is responsible for
//! comparing value-61 microchips with value-17 microchips?
use aoclib::parse;
use std::{
array,
collections::{hash_map::Entry, HashMap, VecDeque},
path::Path,
};
// These typedefs aren't type-safe with each other, but they still
// make it easier to read the code.
pub type Id = u32;
pub type Value = u32;
pub type Bots = HashMap<Id, Bot>;
pub type Outputs = HashMap<Id, Value>;
#[derive(Debug)]
pub struct Output(Id);
#[derive(Debug, Default, Clone)]
pub struct Bot {
pub id: Id,
low: Option<Value>,
high: Option<Value>,
}
impl Bot {
pub fn new(id: Id) -> Bot {
Bot {
id,
..Bot::default()
}
}
/// True if bot has two values
pub fn is_full(&self) -> bool {
self.low.is_some() && self.high.is_some()
}
/// Add a result to this bot, or error if it's full
pub fn add_value(&mut self, mut value: Value) -> Result<(), Error> {
if let Some(mut low) = self.low.take() {
if low > value {
std::mem::swap(&mut low, &mut value);
}
self.low = Some(low);
self.high = Some(value);
} else {
self.low = Some(value);
}
Ok(())
}
}
/// A Receiver is a Bot or an Output: it can receive items.
///
/// In either case, it contains the ID of the destination item
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Receiver {
#[display("bot {0}")]
Bot(Id),
#[display("output {0}")]
Output(Id),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Instruction {
#[display("value {value} goes to bot {bot_id}")]
Get { bot_id: Id, value: Value },
#[display("bot {bot_id} gives low to {low_dest} and high to {high_dest}")]
Transfer {
bot_id: Id,
low_dest: Receiver,
high_dest: Receiver,
},
}
impl Instruction {
pub const fn get(bot_id: Id, value: Value) -> Instruction {
Instruction::Get { bot_id, value }
}
pub const fn transfer(bot_id: Id, low_dest: Receiver, high_dest: Receiver) -> Instruction |
}
/// Process a list of instructions.
///
/// Be careful--there's no guard currently in place against an incomplete list of instructions
/// leading to an infinite loop.
pub fn process(instructions: &[Instruction]) -> Result<(Bots, Outputs), Error> {
let mut bots = Bots::new();
let mut outputs = Outputs::new();
// convert to double-ended queue
let mut instructions: VecDeque<Instruction> = instructions.iter().copied().collect();
while let Some(instruction) = instructions.pop_front() {
match instruction {
Instruction::Get { value, bot_id } => bots
.entry(bot_id)
.or_insert_with(|| Bot::new(bot_id))
.add_value(value)?,
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
} => {
// clone the bot here to avoid mutable-immutable borrow issues
// bots are small; this is cheap
if let Some(Bot {
low: Some(low),
high: Some(high),
..
}) = bots.get(&bot_id).cloned()
{
// transfer instruction and bot is full
let mut give_to_receiver = |value, receiver| match receiver {
Receiver::Bot(id) => bots
.entry(id)
.or_insert_with(|| Bot::new(id))
.add_value(value),
Receiver::Output(id) => match outputs.entry(id) {
Entry::Occupied(entry) => {
// it's an error to put two different values into the same output
if *entry.get()!= value {
Err(Error::OutputInsert(id, *entry.get(), value))
} else {
Ok(())
}
}
Entry::Vacant(entry) => {
entry.insert(value);
Ok(())
}
},
};
give_to_receiver(low, low_dest)?;
give_to_receiver(high, high_dest)?;
} else {
// bot is not found or not full; try again later
instructions.push_back(Instruction::transfer(bot_id, low_dest, high_dest));
}
}
}
}
Ok((bots, outputs))
}
/// Return the bot ID which handles the specified values
pub fn find_bot_handling(bots: &Bots, mut low: Value, mut high: Value) -> Result<Id, Error> {
// ensure v1 <= v2 for simpler comparisons
if low > high {
std::mem::swap(&mut low, &mut high);
}
bots.values()
.find(|bot| bot.low == Some(low) && bot.high == Some(high))
.map(|bot| bot.id)
.ok_or(Error::NoBotFound(low, high))
}
pub fn part1(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (bots, _) = process(&instructions)?;
let bot = find_bot_handling(&bots, 61, 17)?;
println!("Bot handling (61, 17): {}", bot);
Ok(())
}
pub fn part2(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (_, outputs) = process(&instructions)?;
let chips = array::IntoIter::new([0, 1, 2])
.map(|id| outputs.get(&id).ok_or(Error::NoChipFound(id)))
.collect::<Result<Vec<_>, _>>()?;
let chip_product: Value = chips.into_iter().product();
println!("Product of chips (0, 1, 2): {}", chip_product);
Ok(())
}
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("bot {1} is full but attempted to insert {0}")]
BotInsert(Value, Id),
#[error("could not find bot handling ({0}, {1})")]
NoBotFound(Value, Value),
#[error("output {0} contains {1} but attempted to insert {2}")]
OutputInsert(Id, Value, Value),
#[error("could not find a chip output {0}")]
NoChipFound(Id),
}
#[cfg(test)]
mod tests {
use super::*;
use maplit::hashmap;
const EXAMPLE_INSTRUCTIONS_STR: &[&str] = &[
"value 5 goes to bot 2",
"bot 2 gives low to bot 1 and high to bot 0",
"value 3 goes to bot 1",
"bot 1 gives low to output 1 and high to bot 0",
"bot 0 gives low to output 2 and high to output 0",
"value 2 goes to bot 2",
];
const EXAMPLE_INSTRUCTIONS: &[Instruction] = &[
Instruction::get(2, 5),
Instruction::transfer(2, Receiver::Bot(1), Receiver::Bot(0)),
Instruction::get(1, 3),
Instruction::transfer(1, Receiver::Output(1), Receiver::Bot(0)),
Instruction::transfer(0, Receiver::Output(2), Receiver::Output(0)),
Instruction::get(2, 2),
];
#[test]
fn test_expected() {
let expected_outputs = hashmap! {
0 => 5,
1 => 2,
2 => 3,
};
let (bots, outputs) = process(EXAMPLE_INSTRUCTIONS).unwrap();
println!("Bots:");
for bot in bots.values() {
println!(" {:?}", bot);
}
println!("Outputs: {:?}", outputs);
assert!(outputs == expected_outputs);
assert_eq!(find_bot_handling(&bots, 5, 2).unwrap(), 2);
}
#[test]
fn test_parse() {
for (raw, parsed) in EXAMPLE_INSTRUCTIONS_STR
.iter()
.zip(EXAMPLE_INSTRUCTIONS.iter())
{
println!("Parsing '{}'; expecting {:?}", raw, parsed);
let got = raw.parse::<Instruction>().unwrap();
assert_eq!(got, *parsed);
}
}
}
| {
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
}
} | identifier_body |
app.rs | use std::cell::RefCell;
use std::error;
use gio::{self, prelude::*};
use gtk::{self, prelude::*};
use crate::utils::*;
use crate::header_bar::*;
use crate::about_dialog::*;
#[derive(Clone)]
pub struct App {
main_window: gtk::ApplicationWindow,
pub header_bar: HeaderBar,
url_input: gtk::Entry
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Action {
About,
Quit,
ClickToggle(ToggleButtonState)
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ToggleButtonState {
State1,
State2,
}
impl<'a> From<&'a glib::Variant> for ToggleButtonState {
fn from(v: &glib::Variant) -> ToggleButtonState {
v.get::<bool>().expect("Invalid record state type").into()
}
}
impl From<bool> for ToggleButtonState {
fn from(v: bool) -> ToggleButtonState {
match v {
false => ToggleButtonState::State1,
true => ToggleButtonState::State2,
}
}
}
impl From<ToggleButtonState> for glib::Variant {
fn from(v: ToggleButtonState) -> glib::Variant {
match v {
ToggleButtonState::State1 => false.to_variant(),
ToggleButtonState::State2 => true.to_variant(),
}
}
}
trait GtkComboBoxTrait {
fn get_text(self: &Self) -> String;
}
impl GtkComboBoxTrait for gtk::ComboBoxText {
fn get_text(&self) -> String {
self.get_active_text()
.expect("Failed to get widget text")
.to_string()
}
}
impl App {
fn new(application: >k::Application) -> Result<App, Box<dyn error::Error>> {
let (tx, rx) = glib::MainContext::channel(glib::PRIORITY_DEFAULT);
// Here build the UI but don't show it yet
let main_window = gtk::ApplicationWindow::new(application);
main_window.set_title("(poor) Postman");
main_window.set_border_width(5);
main_window.set_position(gtk::WindowPosition::Center);
main_window.set_default_size(840, 480);
// Create headerbar for the application window
let header_bar = HeaderBar::new(&main_window);
// create a widget container,
let layout = gtk::Box::new(gtk::Orientation::Vertical, 5);
// Create a title label
let url_title = gtk::Label::new(None);
url_title.set_markup("<big>Type in your URL</big>");
// Pressing Alt+T will activate this button
let button = gtk::Button::new();
let btn_label = gtk::Label::new_with_mnemonic(
Some("_Click to trigger request")
);
button.add(&btn_label);
// Trigger request button
let trigger_btn_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
trigger_btn_row.pack_start(&button, false, true, 10);
let url_input = gtk::Entry::new();
url_input.set_placeholder_text("(poor) Postman");
url_input.insert_text("http://httpbin.org/get", &mut 0);
let verb_selector = gtk::ComboBoxText::new();
verb_selector.insert(0, "ID0", "GET");
verb_selector.insert(1, "ID1", "POST");
verb_selector.set_active(Some(0));
let verb_url_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
verb_url_row.add(&verb_selector);
// http://gtk-rs.org/docs/gtk/prelude/trait.BoxExt.html#tymethod.pack_start
// params: child, expand, fill, padding (px)
verb_url_row.pack_start(&url_input, true, true, 0);
// Payload horizontal block
let payload_title = gtk::Label::new(None);
payload_title.set_markup("<big>Payload</big>");
let payload_input = gtk::Entry::new();
payload_input.insert_text(r#"ex. {"k": "key","v": "val"}"#, &mut 0);
payload_input.set_sensitive(false);
let payload_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
payload_row.set_sensitive(false);
payload_row.add(&payload_title);
payload_row.pack_start(&payload_input, true, true, 0);
// when POST is selected, activate the payload input box
// TODO: why don't I need to also clone "payload_input"?
verb_selector.connect_changed(clone!(payload_row, payload_input => move |verb_selector| {
let txt = gtk::ComboBoxText::get_text(&verb_selector);
match txt.as_ref() {
"POST" => {
payload_row.set_sensitive(true);
payload_input.set_sensitive(true);
}
_ => {
payload_row.set_sensitive(false);
payload_input.set_sensitive(false);
}
}
}));
// connect the Button click to the callback
button.connect_clicked(clone!(button, verb_selector, url_input,
payload_input, tx => move |_| {
button.set_sensitive(false);
// and trigger HTTP thread
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
url_input.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// connect the <Return> keypress to the callback
url_input.connect_activate(clone!(button, verb_selector,
payload_input, tx => move |_entry| {
button.set_sensitive(false);
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
_entry.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// container for the response
let response_container = gtk::TextView::new();
response_container.set_editable(false);
response_container.set_wrap_mode(gtk::WrapMode::Word);
let buf = response_container.get_buffer().expect("I thought it could work...");
buf.set_text("The response will appear here...");
// add all widgets
layout.add(&url_title);
layout.add(&verb_url_row);
layout.pack_start(&payload_row, false, true, 10);
layout.add(&trigger_btn_row);
layout.pack_start(&response_container, true, true, 10);
// add the widget container to the window
main_window.add(&layout);
let app = App {
main_window,
url_input,
header_bar,
};
// Create the application actions
Action::create(&app, &application);
// attach thread receiver
rx.attach(None, move |text| {
// let text = format_response(text);
buf.set_text(&text);
// enable the button again
button.set_sensitive(true);
// keeps the channel open
glib::Continue(true)
});
Ok(app)
}
pub fn on_startup(application: >k::Application) {
let app = match App::new(application) {
Ok(app) => app,
Err(err) => {
eprintln!("Error creating app: {}",err);
return;
}
};
application.connect_activate(clone!(app => move |_| {
app.on_activate();
}));
// cant get rid of this RefCell wrapping...
let app_container = RefCell::new(Some(app));
application.connect_shutdown(move |_| {
let app = app_container
.borrow_mut()
.take()
.expect("Shutdown called multiple times");
app.on_shutdown();
});
}
fn on_activate(&self) {
// Show our window and bring it to the foreground
self.main_window.show_all();
self.main_window
.present_with_time((glib::get_monotonic_time() / 1000) as u32);
}
// Called when the application shuts down. We drop our app struct here
fn on_shutdown(self) |
}
impl Action {
// The full action name as is used in e.g. menu models
pub fn full_name(self) -> &'static str {
match self {
Action::About => "app.about",
Action::Quit => "app.quit",
Action::ClickToggle(_) => "app.toggle",
}
}
// Create our application actions here
fn create(app: &App, application: >k::Application) {
eprintln!("Creating actions!");
// about action: when activated it will show an about dialog
let about = gio::SimpleAction::new("about", None);
about.connect_activate(clone!(application => move |_action, _parameter| {
show_about_dialog(&application);
}));
application.add_action(&about);
// switch button action
// credits: https://github.com/gtk-rs/examples/blob/master/src/bin/menu_bar_system.rs
let switch_action = gio::SimpleAction::new_stateful("switch", None, &false.to_variant());
let switch_btn = &app.header_bar.switch_btn;
switch_btn.connect_property_active_notify(clone!(switch_action => move |s| {
eprintln!("The switch is now {}", &s.get_active().to_variant());
switch_action.change_state(&s.get_active().to_variant());
}));
application.add_action(&switch_action);
// toggle button action
let toggle_action = gio::SimpleAction::new_stateful("toggle", None, &false.to_variant());
let toggle_btn = &app.header_bar.toggle_button;
toggle_btn.connect_toggled(|btn| {
eprintln!("Button state is {}", btn.get_active());
let app = gio::Application::get_default().expect("No default application");
Action::ClickToggle(ToggleButtonState::from(btn.get_active())).trigger(&app);
});
application.add_action(&toggle_action);
// When activated, shuts down the application
let quit = gio::SimpleAction::new("quit", None);
quit.connect_activate(clone!(application => move |_action, _parameter| {
application.quit();
}));
application.set_accels_for_action(Action::Quit.full_name(), &["<Primary>Q"]);
application.add_action(&quit);
}
pub fn trigger<A: IsA<gio::Application> + gio::ActionGroupExt>(self, app: &A) {
match self {
Action::Quit => app.activate_action("quit", None),
Action::About => app.activate_action("about", None),
Action::ClickToggle(new_state) => app.change_action_state("toggle", &new_state.into()),
}
}
}
| {
eprintln!("Shutting down the whole thing");
} | identifier_body |
app.rs | use std::cell::RefCell;
use std::error;
use gio::{self, prelude::*};
use gtk::{self, prelude::*};
use crate::utils::*;
use crate::header_bar::*;
use crate::about_dialog::*;
#[derive(Clone)]
pub struct App {
main_window: gtk::ApplicationWindow,
pub header_bar: HeaderBar,
url_input: gtk::Entry
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Action {
About,
Quit,
ClickToggle(ToggleButtonState)
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ToggleButtonState {
State1,
State2,
}
impl<'a> From<&'a glib::Variant> for ToggleButtonState {
fn from(v: &glib::Variant) -> ToggleButtonState {
v.get::<bool>().expect("Invalid record state type").into()
}
}
impl From<bool> for ToggleButtonState {
fn from(v: bool) -> ToggleButtonState {
match v {
false => ToggleButtonState::State1,
true => ToggleButtonState::State2,
}
}
}
impl From<ToggleButtonState> for glib::Variant {
fn from(v: ToggleButtonState) -> glib::Variant {
match v {
ToggleButtonState::State1 => false.to_variant(),
ToggleButtonState::State2 => true.to_variant(),
}
}
}
trait GtkComboBoxTrait {
fn get_text(self: &Self) -> String;
}
impl GtkComboBoxTrait for gtk::ComboBoxText {
fn get_text(&self) -> String {
self.get_active_text()
.expect("Failed to get widget text")
.to_string()
}
}
impl App {
fn new(application: >k::Application) -> Result<App, Box<dyn error::Error>> {
let (tx, rx) = glib::MainContext::channel(glib::PRIORITY_DEFAULT);
// Here build the UI but don't show it yet
let main_window = gtk::ApplicationWindow::new(application);
main_window.set_title("(poor) Postman");
main_window.set_border_width(5);
main_window.set_position(gtk::WindowPosition::Center);
main_window.set_default_size(840, 480);
// Create headerbar for the application window
let header_bar = HeaderBar::new(&main_window);
// create a widget container,
let layout = gtk::Box::new(gtk::Orientation::Vertical, 5);
// Create a title label
let url_title = gtk::Label::new(None);
url_title.set_markup("<big>Type in your URL</big>");
// Pressing Alt+T will activate this button
let button = gtk::Button::new();
let btn_label = gtk::Label::new_with_mnemonic(
Some("_Click to trigger request")
);
button.add(&btn_label);
// Trigger request button
let trigger_btn_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
trigger_btn_row.pack_start(&button, false, true, 10);
let url_input = gtk::Entry::new();
url_input.set_placeholder_text("(poor) Postman");
url_input.insert_text("http://httpbin.org/get", &mut 0);
let verb_selector = gtk::ComboBoxText::new();
verb_selector.insert(0, "ID0", "GET");
verb_selector.insert(1, "ID1", "POST");
verb_selector.set_active(Some(0));
let verb_url_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
verb_url_row.add(&verb_selector);
// http://gtk-rs.org/docs/gtk/prelude/trait.BoxExt.html#tymethod.pack_start
// params: child, expand, fill, padding (px)
verb_url_row.pack_start(&url_input, true, true, 0);
// Payload horizontal block | let payload_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
payload_row.set_sensitive(false);
payload_row.add(&payload_title);
payload_row.pack_start(&payload_input, true, true, 0);
// when POST is selected, activate the payload input box
// TODO: why don't I need to also clone "payload_input"?
verb_selector.connect_changed(clone!(payload_row, payload_input => move |verb_selector| {
let txt = gtk::ComboBoxText::get_text(&verb_selector);
match txt.as_ref() {
"POST" => {
payload_row.set_sensitive(true);
payload_input.set_sensitive(true);
}
_ => {
payload_row.set_sensitive(false);
payload_input.set_sensitive(false);
}
}
}));
// connect the Button click to the callback
button.connect_clicked(clone!(button, verb_selector, url_input,
payload_input, tx => move |_| {
button.set_sensitive(false);
// and trigger HTTP thread
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
url_input.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// connect the <Return> keypress to the callback
url_input.connect_activate(clone!(button, verb_selector,
payload_input, tx => move |_entry| {
button.set_sensitive(false);
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
_entry.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// container for the response
let response_container = gtk::TextView::new();
response_container.set_editable(false);
response_container.set_wrap_mode(gtk::WrapMode::Word);
let buf = response_container.get_buffer().expect("I thought it could work...");
buf.set_text("The response will appear here...");
// add all widgets
layout.add(&url_title);
layout.add(&verb_url_row);
layout.pack_start(&payload_row, false, true, 10);
layout.add(&trigger_btn_row);
layout.pack_start(&response_container, true, true, 10);
// add the widget container to the window
main_window.add(&layout);
let app = App {
main_window,
url_input,
header_bar,
};
// Create the application actions
Action::create(&app, &application);
// attach thread receiver
rx.attach(None, move |text| {
// let text = format_response(text);
buf.set_text(&text);
// enable the button again
button.set_sensitive(true);
// keeps the channel open
glib::Continue(true)
});
Ok(app)
}
pub fn on_startup(application: >k::Application) {
let app = match App::new(application) {
Ok(app) => app,
Err(err) => {
eprintln!("Error creating app: {}",err);
return;
}
};
application.connect_activate(clone!(app => move |_| {
app.on_activate();
}));
// cant get rid of this RefCell wrapping...
let app_container = RefCell::new(Some(app));
application.connect_shutdown(move |_| {
let app = app_container
.borrow_mut()
.take()
.expect("Shutdown called multiple times");
app.on_shutdown();
});
}
fn on_activate(&self) {
// Show our window and bring it to the foreground
self.main_window.show_all();
self.main_window
.present_with_time((glib::get_monotonic_time() / 1000) as u32);
}
// Called when the application shuts down. We drop our app struct here
fn on_shutdown(self) {
eprintln!("Shutting down the whole thing");
}
}
impl Action {
// The full action name as is used in e.g. menu models
pub fn full_name(self) -> &'static str {
match self {
Action::About => "app.about",
Action::Quit => "app.quit",
Action::ClickToggle(_) => "app.toggle",
}
}
// Create our application actions here
fn create(app: &App, application: >k::Application) {
eprintln!("Creating actions!");
// about action: when activated it will show an about dialog
let about = gio::SimpleAction::new("about", None);
about.connect_activate(clone!(application => move |_action, _parameter| {
show_about_dialog(&application);
}));
application.add_action(&about);
// switch button action
// credits: https://github.com/gtk-rs/examples/blob/master/src/bin/menu_bar_system.rs
let switch_action = gio::SimpleAction::new_stateful("switch", None, &false.to_variant());
let switch_btn = &app.header_bar.switch_btn;
switch_btn.connect_property_active_notify(clone!(switch_action => move |s| {
eprintln!("The switch is now {}", &s.get_active().to_variant());
switch_action.change_state(&s.get_active().to_variant());
}));
application.add_action(&switch_action);
// toggle button action
let toggle_action = gio::SimpleAction::new_stateful("toggle", None, &false.to_variant());
let toggle_btn = &app.header_bar.toggle_button;
toggle_btn.connect_toggled(|btn| {
eprintln!("Button state is {}", btn.get_active());
let app = gio::Application::get_default().expect("No default application");
Action::ClickToggle(ToggleButtonState::from(btn.get_active())).trigger(&app);
});
application.add_action(&toggle_action);
// When activated, shuts down the application
let quit = gio::SimpleAction::new("quit", None);
quit.connect_activate(clone!(application => move |_action, _parameter| {
application.quit();
}));
application.set_accels_for_action(Action::Quit.full_name(), &["<Primary>Q"]);
application.add_action(&quit);
}
pub fn trigger<A: IsA<gio::Application> + gio::ActionGroupExt>(self, app: &A) {
match self {
Action::Quit => app.activate_action("quit", None),
Action::About => app.activate_action("about", None),
Action::ClickToggle(new_state) => app.change_action_state("toggle", &new_state.into()),
}
}
} | let payload_title = gtk::Label::new(None);
payload_title.set_markup("<big>Payload</big>");
let payload_input = gtk::Entry::new();
payload_input.insert_text(r#"ex. {"k": "key","v": "val"}"#, &mut 0);
payload_input.set_sensitive(false); | random_line_split |
app.rs | use std::cell::RefCell;
use std::error;
use gio::{self, prelude::*};
use gtk::{self, prelude::*};
use crate::utils::*;
use crate::header_bar::*;
use crate::about_dialog::*;
#[derive(Clone)]
pub struct App {
main_window: gtk::ApplicationWindow,
pub header_bar: HeaderBar,
url_input: gtk::Entry
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Action {
About,
Quit,
ClickToggle(ToggleButtonState)
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ToggleButtonState {
State1,
State2,
}
impl<'a> From<&'a glib::Variant> for ToggleButtonState {
fn | (v: &glib::Variant) -> ToggleButtonState {
v.get::<bool>().expect("Invalid record state type").into()
}
}
impl From<bool> for ToggleButtonState {
fn from(v: bool) -> ToggleButtonState {
match v {
false => ToggleButtonState::State1,
true => ToggleButtonState::State2,
}
}
}
impl From<ToggleButtonState> for glib::Variant {
fn from(v: ToggleButtonState) -> glib::Variant {
match v {
ToggleButtonState::State1 => false.to_variant(),
ToggleButtonState::State2 => true.to_variant(),
}
}
}
trait GtkComboBoxTrait {
fn get_text(self: &Self) -> String;
}
impl GtkComboBoxTrait for gtk::ComboBoxText {
fn get_text(&self) -> String {
self.get_active_text()
.expect("Failed to get widget text")
.to_string()
}
}
impl App {
fn new(application: >k::Application) -> Result<App, Box<dyn error::Error>> {
let (tx, rx) = glib::MainContext::channel(glib::PRIORITY_DEFAULT);
// Here build the UI but don't show it yet
let main_window = gtk::ApplicationWindow::new(application);
main_window.set_title("(poor) Postman");
main_window.set_border_width(5);
main_window.set_position(gtk::WindowPosition::Center);
main_window.set_default_size(840, 480);
// Create headerbar for the application window
let header_bar = HeaderBar::new(&main_window);
// create a widget container,
let layout = gtk::Box::new(gtk::Orientation::Vertical, 5);
// Create a title label
let url_title = gtk::Label::new(None);
url_title.set_markup("<big>Type in your URL</big>");
// Pressing Alt+T will activate this button
let button = gtk::Button::new();
let btn_label = gtk::Label::new_with_mnemonic(
Some("_Click to trigger request")
);
button.add(&btn_label);
// Trigger request button
let trigger_btn_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
trigger_btn_row.pack_start(&button, false, true, 10);
let url_input = gtk::Entry::new();
url_input.set_placeholder_text("(poor) Postman");
url_input.insert_text("http://httpbin.org/get", &mut 0);
let verb_selector = gtk::ComboBoxText::new();
verb_selector.insert(0, "ID0", "GET");
verb_selector.insert(1, "ID1", "POST");
verb_selector.set_active(Some(0));
let verb_url_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
verb_url_row.add(&verb_selector);
// http://gtk-rs.org/docs/gtk/prelude/trait.BoxExt.html#tymethod.pack_start
// params: child, expand, fill, padding (px)
verb_url_row.pack_start(&url_input, true, true, 0);
// Payload horizontal block
let payload_title = gtk::Label::new(None);
payload_title.set_markup("<big>Payload</big>");
let payload_input = gtk::Entry::new();
payload_input.insert_text(r#"ex. {"k": "key","v": "val"}"#, &mut 0);
payload_input.set_sensitive(false);
let payload_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
payload_row.set_sensitive(false);
payload_row.add(&payload_title);
payload_row.pack_start(&payload_input, true, true, 0);
// when POST is selected, activate the payload input box
// TODO: why don't I need to also clone "payload_input"?
verb_selector.connect_changed(clone!(payload_row, payload_input => move |verb_selector| {
let txt = gtk::ComboBoxText::get_text(&verb_selector);
match txt.as_ref() {
"POST" => {
payload_row.set_sensitive(true);
payload_input.set_sensitive(true);
}
_ => {
payload_row.set_sensitive(false);
payload_input.set_sensitive(false);
}
}
}));
// connect the Button click to the callback
button.connect_clicked(clone!(button, verb_selector, url_input,
payload_input, tx => move |_| {
button.set_sensitive(false);
// and trigger HTTP thread
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
url_input.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// connect the <Return> keypress to the callback
url_input.connect_activate(clone!(button, verb_selector,
payload_input, tx => move |_entry| {
button.set_sensitive(false);
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
_entry.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// container for the response
let response_container = gtk::TextView::new();
response_container.set_editable(false);
response_container.set_wrap_mode(gtk::WrapMode::Word);
let buf = response_container.get_buffer().expect("I thought it could work...");
buf.set_text("The response will appear here...");
// add all widgets
layout.add(&url_title);
layout.add(&verb_url_row);
layout.pack_start(&payload_row, false, true, 10);
layout.add(&trigger_btn_row);
layout.pack_start(&response_container, true, true, 10);
// add the widget container to the window
main_window.add(&layout);
let app = App {
main_window,
url_input,
header_bar,
};
// Create the application actions
Action::create(&app, &application);
// attach thread receiver
rx.attach(None, move |text| {
// let text = format_response(text);
buf.set_text(&text);
// enable the button again
button.set_sensitive(true);
// keeps the channel open
glib::Continue(true)
});
Ok(app)
}
pub fn on_startup(application: >k::Application) {
let app = match App::new(application) {
Ok(app) => app,
Err(err) => {
eprintln!("Error creating app: {}",err);
return;
}
};
application.connect_activate(clone!(app => move |_| {
app.on_activate();
}));
// cant get rid of this RefCell wrapping...
let app_container = RefCell::new(Some(app));
application.connect_shutdown(move |_| {
let app = app_container
.borrow_mut()
.take()
.expect("Shutdown called multiple times");
app.on_shutdown();
});
}
fn on_activate(&self) {
// Show our window and bring it to the foreground
self.main_window.show_all();
self.main_window
.present_with_time((glib::get_monotonic_time() / 1000) as u32);
}
// Called when the application shuts down. We drop our app struct here
fn on_shutdown(self) {
eprintln!("Shutting down the whole thing");
}
}
impl Action {
// The full action name as is used in e.g. menu models
pub fn full_name(self) -> &'static str {
match self {
Action::About => "app.about",
Action::Quit => "app.quit",
Action::ClickToggle(_) => "app.toggle",
}
}
// Create our application actions here
fn create(app: &App, application: >k::Application) {
eprintln!("Creating actions!");
// about action: when activated it will show an about dialog
let about = gio::SimpleAction::new("about", None);
about.connect_activate(clone!(application => move |_action, _parameter| {
show_about_dialog(&application);
}));
application.add_action(&about);
// switch button action
// credits: https://github.com/gtk-rs/examples/blob/master/src/bin/menu_bar_system.rs
let switch_action = gio::SimpleAction::new_stateful("switch", None, &false.to_variant());
let switch_btn = &app.header_bar.switch_btn;
switch_btn.connect_property_active_notify(clone!(switch_action => move |s| {
eprintln!("The switch is now {}", &s.get_active().to_variant());
switch_action.change_state(&s.get_active().to_variant());
}));
application.add_action(&switch_action);
// toggle button action
let toggle_action = gio::SimpleAction::new_stateful("toggle", None, &false.to_variant());
let toggle_btn = &app.header_bar.toggle_button;
toggle_btn.connect_toggled(|btn| {
eprintln!("Button state is {}", btn.get_active());
let app = gio::Application::get_default().expect("No default application");
Action::ClickToggle(ToggleButtonState::from(btn.get_active())).trigger(&app);
});
application.add_action(&toggle_action);
// When activated, shuts down the application
let quit = gio::SimpleAction::new("quit", None);
quit.connect_activate(clone!(application => move |_action, _parameter| {
application.quit();
}));
application.set_accels_for_action(Action::Quit.full_name(), &["<Primary>Q"]);
application.add_action(&quit);
}
pub fn trigger<A: IsA<gio::Application> + gio::ActionGroupExt>(self, app: &A) {
match self {
Action::Quit => app.activate_action("quit", None),
Action::About => app.activate_action("about", None),
Action::ClickToggle(new_state) => app.change_action_state("toggle", &new_state.into()),
}
}
}
| from | identifier_name |
footprint_analysis.rs | , and use symbolic execution on each opcode again.
use crossbeam::queue::SegQueue;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::error::Error;
use std::fmt;
use std::io::Write;
use std::path::Path;
use std::sync::Arc;
use std::time::Instant;
use isla_lib::cache::{Cacheable, Cachekey};
use isla_lib::concrete::BV;
use isla_lib::config::ISAConfig;
use isla_lib::executor;
use isla_lib::executor::LocalFrame;
use isla_lib::ir::*;
use isla_lib::log;
use isla_lib::simplify::{EventReferences, Taints};
use isla_lib::smt::{Accessor, EvPath, Event, Sym};
use isla_lib::zencode;
#[derive(Debug, Serialize, Deserialize)]
pub struct Footprint {
/// Tracks which (symbolic) registers / memory reads can feed into
/// a memory write within an instruction
write_data_taints: (Taints, bool),
/// Tracks with (symbolic) registers / memory reads can feed into
/// a memory operator (read/write) address within an instruction
mem_addr_taints: (Taints, bool),
/// Tracks which (symbolic) registers / memory reads can feed into
/// the address of a branch
branch_addr_taints: (Taints, bool),
/// The set of register reads (with subfield granularity)
register_reads: HashSet<(Name, Vec<Accessor>)>,
/// The set of register writes (also with subfield granularity)
register_writes: HashSet<(Name, Vec<Accessor>)>,
/// The set of register writes where the value was tainted by a memory read
register_writes_tainted: HashSet<(Name, Vec<Accessor>)>,
/// All register writes to the following registers are ignored for
/// tracking dependencies within an instruction
register_writes_ignored: HashSet<Name>,
/// A store is any instruction with a WriteMem event
is_store: bool,
/// A load is any instruction with a ReadMem event
is_load: bool,
/// A branch is any instruction with a Branch event
is_branch: bool,
/// An exclusive is any event with an exclusive read or write kind.
is_exclusive: bool,
/// A cache-op is any event with a CacheOp event
is_cache_op: bool,
}
pub struct Footprintkey {
opcode: String,
}
impl Cachekey for Footprintkey {
fn key(&self) -> String |
}
impl Cacheable for Footprint {
type Key = Footprintkey;
}
impl Footprint {
fn new() -> Self {
Footprint {
write_data_taints: (HashSet::new(), false),
mem_addr_taints: (HashSet::new(), false),
branch_addr_taints: (HashSet::new(), false),
register_reads: HashSet::new(),
register_writes: HashSet::new(),
register_writes_tainted: HashSet::new(),
register_writes_ignored: HashSet::new(),
is_store: false,
is_load: false,
is_branch: false,
is_exclusive: false,
is_cache_op: false,
}
}
/// This just prints the footprint information in a human-readable
/// form for debugging.
pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> {
write!(buf, "Footprint:\n Memory write data:")?;
for (reg, accessor) in &self.write_data_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Memory address:")?;
for (reg, accessor) in &self.mem_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Branch address:")?;
for (reg, accessor) in &self.branch_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register reads:")?;
for (reg, accessor) in &self.register_reads {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes:")?;
for (reg, accessor) in &self.register_writes {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes (tainted):")?;
for (reg, accessor) in &self.register_writes_tainted {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Is store: {}", self.is_store)?;
write!(buf, "\n Is load: {}", self.is_load)?;
write!(buf, "\n Is exclusive: {}", self.is_exclusive)?;
write!(buf, "\n Is branch: {}", self.is_branch)?;
writeln!(buf)?;
Ok(())
}
}
// There is an rmw dependency from `from` to `to` if `from` is a
// load-exclusive and `to` is a store-exclusive and there are no
// intervening exclusives.
#[allow(clippy::needless_range_loop)]
pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from > to {
return false;
}
let from_footprint = footprints.get(&instrs[from]).unwrap();
if!(from_footprint.is_exclusive && from_footprint.is_load) {
return false;
}
for i in (from + 1)..to {
if footprints.get(&instrs[i]).unwrap().is_exclusive {
return false;
}
}
let to_footprint = footprints.get(&instrs[to]).unwrap();
to_footprint.is_exclusive && to_footprint.is_store
}
/// The set of registers that could be (syntactically) touched by the
/// first instruction before reaching the second.
#[allow(clippy::needless_range_loop)]
fn touched_by<B: BV>(
from: usize,
to: usize,
instrs: &[B],
footprints: &HashMap<B, Footprint>,
) -> HashSet<(Name, Vec<Accessor>)> {
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = HashSet::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if!footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.insert(wreg.clone());
}
}
}
}
if new_touched.is_empty() {
for wreg in &footprint.register_writes {
touched.remove(wreg);
}
} else {
new_touched.drain().for_each(|wreg| {
touched.insert(wreg);
})
}
}
touched
}
/// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// Panics if either `from` or `to` are out-of-bounds in `instrs`, or
/// if an instruction does not have a footprint.
pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be po-order-later than `from` for the dependency to exist.
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
// If any of the registers transitively touched by the first
// instruction's register writes can feed into a memory address
// used by the last we have an address dependency.
for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
pub fn data_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
#[allow(clippy::needless_range_loop)]
pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be a program-order later load or store
let to_footprint = footprints.get(&instrs[from]).unwrap();
if!(to_footprint.is_load || to_footprint.is_store) || (from >= to) {
return false;
}
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = Vec::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
if footprint.is_branch {
for reg in &footprint.branch_addr_taints.0 {
if touched.contains(®) {
return true;
}
}
}
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if!footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.push(wreg.clone());
}
}
}
}
new_touched.drain(..).for_each(|wreg| {
touched.insert(wreg);
})
}
false
}
#[derive(Debug)]
pub enum FootprintError {
NoIslaFootprintFn,
SymbolicInstruction,
ExecutionError(String),
}
impl fmt::Display for FootprintError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use FootprintError::*;
match self {
NoIslaFootprintFn => write!(
f,
"Footprint analysis failed. To calculate the syntactic\n\
register footprint, isla expects a sail function\n\
`isla_footprint' to be available in the model, which\n\
can be used to decode and execute an instruction"
),
SymbolicInstruction => write!(f, "Instruction opcode found during footprint analysis was symbolic"),
ExecutionError(msg) => write!(f, "{}", msg),
}
}
}
impl Error for FootprintError {
fn source(&self) -> Option<&(dyn Error +'static)> {
None
}
}
/// # Arguments
///
/// * `num_threads` - How many threads to use for analysing footprints
/// * `thread_buckets` - A vector of paths (event vectors) for each thread in the litmus test
/// * `lets` - The initial state of all top-level letbindings in the Sail specification
/// * `regs` - The initial register state
/// * `shared_state` - The state shared between all symbolic execution runs
/// * `isa_config` - The architecture specific configuration information
/// * `cache_dir` - A directory to cache footprint results
pub fn footprint_analysis<'ir, B, P>(
num_threads: usize,
thread_buckets: &[Vec<EvPath<B>>],
lets: &Bindings<'ir, B>,
regs: &Bindings<'ir, B>,
shared_state: &SharedState<B>,
isa_config: &ISAConfig<B>,
cache_dir: P,
) -> Result<HashMap<B, Footprint>, FootprintError>
where
B: BV,
P: AsRef<Path>,
{
use FootprintError::*;
let mut concrete_opcodes: HashSet<B> = HashSet::new();
let mut footprints = HashMap::new();
for thread in thread_buckets {
for path in thread {
for event in path {
match event {
Event::Instr(Val::Bits(bv)) => {
if let Some(footprint) =
Footprint::from_cache(Footprintkey { opcode: bv.to_string() }, cache_dir.as_ref())
{
footprints.insert(*bv, footprint);
} else {
concrete_opcodes.insert(*bv);
}
}
Event::Instr(_) => return Err(SymbolicInstruction),
_ => (),
}
}
}
}
log!(log::VERBOSE, &format!("Got {} uncached concrete opcodes for footprint analysis", concrete_opcodes.len()));
let function_id = match shared_state.symtab.get("zisla_footprint") {
Some(id) => id,
None => return Err(NoIslaFootprintFn),
};
let (args, _, instrs) =
shared_state.functions.get(&function_id).expect("isla_footprint function not in shared state!");
let (task_opcodes, tasks): (Vec<B>, Vec<_>) = concrete_opcodes
.iter()
.enumerate()
.map(|(i, opcode)| {
(
opcode,
LocalFrame::new(function_id, args, Some(&[Val::Bits(*opcode)]), instrs)
.add_lets(lets)
.add_regs(regs)
.task(i),
)
})
.unzip();
let mut footprint_buckets: Vec<Vec<EvPath<B>>> = vec![Vec::new(); tasks.len()];
let queue = Arc::new(SegQueue::new());
let now = Instant::now();
executor::start_multi(num_threads, None, tasks, &shared_state, queue.clone(), &executor::footprint_collector);
log!(log::VERBOSE, &format!("Footprint analysis symbolic execution took: {}ms", now.elapsed().as_millis()));
loop {
match queue.pop() {
Ok(Ok((task_id, mut events))) => {
let mut events: Vec<Event<B>> = events
.drain(..)
.rev()
// The first cycle is reserved for initialization
.skip_while(|ev|!ev.is_cycle())
.filter(|ev| ev.is_reg() || ev.is_memory() || ev.is_branch() || ev.is_smt() || ev.is_fork())
.collect();
isla_lib::simplify::remove_unused(&mut events);
footprint_buckets[task_id].push(events)
}
// Error during execution
Ok(Err(msg)) => return Err(ExecutionError(msg)),
// Empty queue
Err(_) => break,
}
}
let num_footprints: usize = footprint_buckets.iter().map(|instr_paths| instr_paths.len()).sum();
log!(log::VERBOSE, &format!("There are {} footprints", num_footprints));
let read_exclusives: Vec<usize> =
isa_config.read_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect();
let write_exclusives: Vec<usize> =
isa_config.write_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect();
for (i, paths) in footprint_buckets.iter().enumerate() {
let opcode = task_opcodes[i];
log!(log::VERBOSE, &format!("{:?}", opcode));
let mut footprint = Footprint::new();
for events in paths {
let evrefs = EventReferences::from_events(events);
let mut forks: Vec<Sym> = Vec::new();
for event in events {
match event {
Event::Fork(_, v, _) => forks.push(*v),
Event::ReadReg(reg, accessor, _) if!isa_config.ignored_registers.contains(reg) => {
footprint.register_reads.insert((*reg, accessor.clone()));
}
Event::WriteReg(reg, accessor, data) if!isa_config.ignored_registers.contains(reg) => {
footprint.register_writes.insert((*reg, accessor.clone()));
// If the data written to the register is tainted by a value read
// from memory record this fact.
if evrefs.value_taints(data, events).1 {
footprint.register_writes_tainted.insert((*reg, accessor.clone()));
}
}
Event::MarkReg { reg, mark } => {
if mark == "ignore_write" {
footprint.register_writes_ignored.insert(*reg);
}
}
Event::ReadMem { address,.. } => {
footprint.is_load = true;
if read_exclusives.iter().any(|rk| event.has_read_kind(*rk)) {
footprint.is_exclusive = true;
}
evrefs.collect_value_taints(
address,
events,
&mut footprint.mem_addr_taints.0,
&mut footprint.mem_addr_taints.1,
)
}
Event::WriteMem { address, data,.. } => {
footprint.is_store = true;
if write_exclusives.iter().any(|wk| event.has_write_kind(*wk)) {
footprint.is_exclusive = true;
}
evrefs.collect_value_taints(
address,
events,
&mut footprint.mem_addr_taints.0,
&mut footprint.mem_addr_taints.1,
);
evrefs.collect_value_taints(
data,
events,
&mut footprint.write_data_taints.0,
&mut footprint.write_data_taints.1,
);
}
Event::CacheOp { address,.. } => {
footprint.is_cache_op = true;
| {
format!("opcode_{}", self.opcode)
} | identifier_body |
footprint_analysis.rs | , and use symbolic execution on each opcode again.
use crossbeam::queue::SegQueue;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::error::Error;
use std::fmt;
use std::io::Write;
use std::path::Path;
use std::sync::Arc;
use std::time::Instant;
use isla_lib::cache::{Cacheable, Cachekey};
use isla_lib::concrete::BV;
use isla_lib::config::ISAConfig;
use isla_lib::executor;
use isla_lib::executor::LocalFrame;
use isla_lib::ir::*;
use isla_lib::log;
use isla_lib::simplify::{EventReferences, Taints};
use isla_lib::smt::{Accessor, EvPath, Event, Sym};
use isla_lib::zencode;
#[derive(Debug, Serialize, Deserialize)]
pub struct Footprint {
/// Tracks which (symbolic) registers / memory reads can feed into
/// a memory write within an instruction
write_data_taints: (Taints, bool),
/// Tracks with (symbolic) registers / memory reads can feed into
/// a memory operator (read/write) address within an instruction
mem_addr_taints: (Taints, bool),
/// Tracks which (symbolic) registers / memory reads can feed into
/// the address of a branch
branch_addr_taints: (Taints, bool),
/// The set of register reads (with subfield granularity)
register_reads: HashSet<(Name, Vec<Accessor>)>,
/// The set of register writes (also with subfield granularity)
register_writes: HashSet<(Name, Vec<Accessor>)>,
/// The set of register writes where the value was tainted by a memory read
register_writes_tainted: HashSet<(Name, Vec<Accessor>)>,
/// All register writes to the following registers are ignored for
/// tracking dependencies within an instruction
register_writes_ignored: HashSet<Name>,
/// A store is any instruction with a WriteMem event
is_store: bool,
/// A load is any instruction with a ReadMem event
is_load: bool,
/// A branch is any instruction with a Branch event
is_branch: bool,
/// An exclusive is any event with an exclusive read or write kind.
is_exclusive: bool,
/// A cache-op is any event with a CacheOp event
is_cache_op: bool,
}
pub struct Footprintkey {
opcode: String,
}
impl Cachekey for Footprintkey {
fn key(&self) -> String {
format!("opcode_{}", self.opcode)
}
}
impl Cacheable for Footprint {
type Key = Footprintkey;
}
impl Footprint {
fn new() -> Self {
Footprint {
write_data_taints: (HashSet::new(), false),
mem_addr_taints: (HashSet::new(), false),
branch_addr_taints: (HashSet::new(), false),
register_reads: HashSet::new(),
register_writes: HashSet::new(),
register_writes_tainted: HashSet::new(),
register_writes_ignored: HashSet::new(),
is_store: false,
is_load: false,
is_branch: false,
is_exclusive: false,
is_cache_op: false,
}
}
/// This just prints the footprint information in a human-readable
/// form for debugging.
pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> {
write!(buf, "Footprint:\n Memory write data:")?;
for (reg, accessor) in &self.write_data_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Memory address:")?;
for (reg, accessor) in &self.mem_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Branch address:")?;
for (reg, accessor) in &self.branch_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register reads:")?;
for (reg, accessor) in &self.register_reads {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes:")?;
for (reg, accessor) in &self.register_writes {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes (tainted):")?;
for (reg, accessor) in &self.register_writes_tainted {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Is store: {}", self.is_store)?;
write!(buf, "\n Is load: {}", self.is_load)?;
write!(buf, "\n Is exclusive: {}", self.is_exclusive)?;
write!(buf, "\n Is branch: {}", self.is_branch)?;
writeln!(buf)?;
Ok(())
}
}
// There is an rmw dependency from `from` to `to` if `from` is a
// load-exclusive and `to` is a store-exclusive and there are no
// intervening exclusives.
#[allow(clippy::needless_range_loop)]
pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from > to {
return false;
}
let from_footprint = footprints.get(&instrs[from]).unwrap();
if!(from_footprint.is_exclusive && from_footprint.is_load) {
return false;
}
for i in (from + 1)..to {
if footprints.get(&instrs[i]).unwrap().is_exclusive {
return false;
}
}
let to_footprint = footprints.get(&instrs[to]).unwrap();
to_footprint.is_exclusive && to_footprint.is_store
}
/// The set of registers that could be (syntactically) touched by the
/// first instruction before reaching the second.
#[allow(clippy::needless_range_loop)]
fn touched_by<B: BV>(
from: usize,
to: usize,
instrs: &[B],
footprints: &HashMap<B, Footprint>,
) -> HashSet<(Name, Vec<Accessor>)> {
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = HashSet::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if!footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.insert(wreg.clone());
}
}
}
}
if new_touched.is_empty() {
for wreg in &footprint.register_writes {
touched.remove(wreg);
}
} else {
new_touched.drain().for_each(|wreg| {
touched.insert(wreg);
})
}
}
touched
}
/// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// Panics if either `from` or `to` are out-of-bounds in `instrs`, or
/// if an instruction does not have a footprint.
pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be po-order-later than `from` for the dependency to exist.
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
// If any of the registers transitively touched by the first
// instruction's register writes can feed into a memory address
// used by the last we have an address dependency.
for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
pub fn | <B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
#[allow(clippy::needless_range_loop)]
pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be a program-order later load or store
let to_footprint = footprints.get(&instrs[from]).unwrap();
if!(to_footprint.is_load || to_footprint.is_store) || (from >= to) {
return false;
}
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = Vec::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
if footprint.is_branch {
for reg in &footprint.branch_addr_taints.0 {
if touched.contains(®) {
return true;
}
}
}
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if!footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.push(wreg.clone());
}
}
}
}
new_touched.drain(..).for_each(|wreg| {
touched.insert(wreg);
})
}
false
}
#[derive(Debug)]
pub enum FootprintError {
NoIslaFootprintFn,
SymbolicInstruction,
ExecutionError(String),
}
impl fmt::Display for FootprintError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use FootprintError::*;
match self {
NoIslaFootprintFn => write!(
f,
"Footprint analysis failed. To calculate the syntactic\n\
register footprint, isla expects a sail function\n\
`isla_footprint' to be available in the model, which\n\
can be used to decode and execute an instruction"
),
SymbolicInstruction => write!(f, "Instruction opcode found during footprint analysis was symbolic"),
ExecutionError(msg) => write!(f, "{}", msg),
}
}
}
impl Error for FootprintError {
fn source(&self) -> Option<&(dyn Error +'static)> {
None
}
}
/// # Arguments
///
/// * `num_threads` - How many threads to use for analysing footprints
/// * `thread_buckets` - A vector of paths (event vectors) for each thread in the litmus test
/// * `lets` - The initial state of all top-level letbindings in the Sail specification
/// * `regs` - The initial register state
/// * `shared_state` - The state shared between all symbolic execution runs
/// * `isa_config` - The architecture specific configuration information
/// * `cache_dir` - A directory to cache footprint results
pub fn footprint_analysis<'ir, B, P>(
num_threads: usize,
thread_buckets: &[Vec<EvPath<B>>],
lets: &Bindings<'ir, B>,
regs: &Bindings<'ir, B>,
shared_state: &SharedState<B>,
isa_config: &ISAConfig<B>,
cache_dir: P,
) -> Result<HashMap<B, Footprint>, FootprintError>
where
B: BV,
P: AsRef<Path>,
{
use FootprintError::*;
let mut concrete_opcodes: HashSet<B> = HashSet::new();
let mut footprints = HashMap::new();
for thread in thread_buckets {
for path in thread {
for event in path {
match event {
Event::Instr(Val::Bits(bv)) => {
if let Some(footprint) =
Footprint::from_cache(Footprintkey { opcode: bv.to_string() }, cache_dir.as_ref())
{
footprints.insert(*bv, footprint);
} else {
concrete_opcodes.insert(*bv);
}
}
Event::Instr(_) => return Err(SymbolicInstruction),
_ => (),
}
}
}
}
log!(log::VERBOSE, &format!("Got {} uncached concrete opcodes for footprint analysis", concrete_opcodes.len()));
let function_id = match shared_state.symtab.get("zisla_footprint") {
Some(id) => id,
None => return Err(NoIslaFootprintFn),
};
let (args, _, instrs) =
shared_state.functions.get(&function_id).expect("isla_footprint function not in shared state!");
let (task_opcodes, tasks): (Vec<B>, Vec<_>) = concrete_opcodes
.iter()
.enumerate()
.map(|(i, opcode)| {
(
opcode,
LocalFrame::new(function_id, args, Some(&[Val::Bits(*opcode)]), instrs)
.add_lets(lets)
.add_regs(regs)
.task(i),
)
})
.unzip();
let mut footprint_buckets: Vec<Vec<EvPath<B>>> = vec![Vec::new(); tasks.len()];
let queue = Arc::new(SegQueue::new());
let now = Instant::now();
executor::start_multi(num_threads, None, tasks, &shared_state, queue.clone(), &executor::footprint_collector);
log!(log::VERBOSE, &format!("Footprint analysis symbolic execution took: {}ms", now.elapsed().as_millis()));
loop {
match queue.pop() {
Ok(Ok((task_id, mut events))) => {
let mut events: Vec<Event<B>> = events
.drain(..)
.rev()
// The first cycle is reserved for initialization
.skip_while(|ev|!ev.is_cycle())
.filter(|ev| ev.is_reg() || ev.is_memory() || ev.is_branch() || ev.is_smt() || ev.is_fork())
.collect();
isla_lib::simplify::remove_unused(&mut events);
footprint_buckets[task_id].push(events)
}
// Error during execution
Ok(Err(msg)) => return Err(ExecutionError(msg)),
// Empty queue
Err(_) => break,
}
}
let num_footprints: usize = footprint_buckets.iter().map(|instr_paths| instr_paths.len()).sum();
log!(log::VERBOSE, &format!("There are {} footprints", num_footprints));
let read_exclusives: Vec<usize> =
isa_config.read_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect();
let write_exclusives: Vec<usize> =
isa_config.write_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect();
for (i, paths) in footprint_buckets.iter().enumerate() {
let opcode = task_opcodes[i];
log!(log::VERBOSE, &format!("{:?}", opcode));
let mut footprint = Footprint::new();
for events in paths {
let evrefs = EventReferences::from_events(events);
let mut forks: Vec<Sym> = Vec::new();
for event in events {
match event {
Event::Fork(_, v, _) => forks.push(*v),
Event::ReadReg(reg, accessor, _) if!isa_config.ignored_registers.contains(reg) => {
footprint.register_reads.insert((*reg, accessor.clone()));
}
Event::WriteReg(reg, accessor, data) if!isa_config.ignored_registers.contains(reg) => {
footprint.register_writes.insert((*reg, accessor.clone()));
// If the data written to the register is tainted by a value read
// from memory record this fact.
if evrefs.value_taints(data, events).1 {
footprint.register_writes_tainted.insert((*reg, accessor.clone()));
}
}
Event::MarkReg { reg, mark } => {
if mark == "ignore_write" {
footprint.register_writes_ignored.insert(*reg);
}
}
Event::ReadMem { address,.. } => {
footprint.is_load = true;
if read_exclusives.iter().any(|rk| event.has_read_kind(*rk)) {
footprint.is_exclusive = true;
}
evrefs.collect_value_taints(
address,
events,
&mut footprint.mem_addr_taints.0,
&mut footprint.mem_addr_taints.1,
)
}
Event::WriteMem { address, data,.. } => {
footprint.is_store = true;
if write_exclusives.iter().any(|wk| event.has_write_kind(*wk)) {
footprint.is_exclusive = true;
}
evrefs.collect_value_taints(
address,
events,
&mut footprint.mem_addr_taints.0,
&mut footprint.mem_addr_taints.1,
);
evrefs.collect_value_taints(
data,
events,
&mut footprint.write_data_taints.0,
&mut footprint.write_data_taints.1,
);
}
Event::CacheOp { address,.. } => {
footprint.is_cache_op = true;
| data_dep | identifier_name |
footprint_analysis.rs | run, and use symbolic execution on each opcode again.
use crossbeam::queue::SegQueue;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::error::Error;
use std::fmt;
use std::io::Write;
use std::path::Path;
use std::sync::Arc;
use std::time::Instant;
use isla_lib::cache::{Cacheable, Cachekey};
use isla_lib::concrete::BV;
use isla_lib::config::ISAConfig;
use isla_lib::executor;
use isla_lib::executor::LocalFrame;
use isla_lib::ir::*;
use isla_lib::log;
use isla_lib::simplify::{EventReferences, Taints};
use isla_lib::smt::{Accessor, EvPath, Event, Sym};
use isla_lib::zencode;
#[derive(Debug, Serialize, Deserialize)]
pub struct Footprint {
/// Tracks which (symbolic) registers / memory reads can feed into
/// a memory write within an instruction
write_data_taints: (Taints, bool),
/// Tracks with (symbolic) registers / memory reads can feed into
/// a memory operator (read/write) address within an instruction
mem_addr_taints: (Taints, bool),
/// Tracks which (symbolic) registers / memory reads can feed into
/// the address of a branch
branch_addr_taints: (Taints, bool),
/// The set of register reads (with subfield granularity)
register_reads: HashSet<(Name, Vec<Accessor>)>,
/// The set of register writes (also with subfield granularity)
register_writes: HashSet<(Name, Vec<Accessor>)>,
/// The set of register writes where the value was tainted by a memory read
register_writes_tainted: HashSet<(Name, Vec<Accessor>)>,
/// All register writes to the following registers are ignored for
/// tracking dependencies within an instruction
register_writes_ignored: HashSet<Name>,
/// A store is any instruction with a WriteMem event
is_store: bool,
/// A load is any instruction with a ReadMem event
is_load: bool,
/// A branch is any instruction with a Branch event
is_branch: bool,
/// An exclusive is any event with an exclusive read or write kind.
is_exclusive: bool,
/// A cache-op is any event with a CacheOp event
is_cache_op: bool,
}
pub struct Footprintkey {
opcode: String,
}
impl Cachekey for Footprintkey {
fn key(&self) -> String {
format!("opcode_{}", self.opcode)
}
}
impl Cacheable for Footprint {
type Key = Footprintkey;
}
impl Footprint {
fn new() -> Self {
Footprint {
write_data_taints: (HashSet::new(), false),
mem_addr_taints: (HashSet::new(), false),
branch_addr_taints: (HashSet::new(), false),
register_reads: HashSet::new(),
register_writes: HashSet::new(),
register_writes_tainted: HashSet::new(),
register_writes_ignored: HashSet::new(),
is_store: false,
is_load: false,
is_branch: false,
is_exclusive: false,
is_cache_op: false,
}
}
/// This just prints the footprint information in a human-readable
/// form for debugging.
pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> {
write!(buf, "Footprint:\n Memory write data:")?;
for (reg, accessor) in &self.write_data_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Memory address:")?;
for (reg, accessor) in &self.mem_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Branch address:")?;
for (reg, accessor) in &self.branch_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register reads:")?;
for (reg, accessor) in &self.register_reads {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes:")?;
for (reg, accessor) in &self.register_writes {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes (tainted):")?;
for (reg, accessor) in &self.register_writes_tainted {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Is store: {}", self.is_store)?;
write!(buf, "\n Is load: {}", self.is_load)?;
write!(buf, "\n Is exclusive: {}", self.is_exclusive)?;
write!(buf, "\n Is branch: {}", self.is_branch)?;
writeln!(buf)?;
Ok(())
}
}
// There is an rmw dependency from `from` to `to` if `from` is a
// load-exclusive and `to` is a store-exclusive and there are no
// intervening exclusives.
#[allow(clippy::needless_range_loop)]
pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from > to {
return false;
}
let from_footprint = footprints.get(&instrs[from]).unwrap();
if!(from_footprint.is_exclusive && from_footprint.is_load) {
return false;
}
for i in (from + 1)..to {
if footprints.get(&instrs[i]).unwrap().is_exclusive {
return false;
}
} |
let to_footprint = footprints.get(&instrs[to]).unwrap();
to_footprint.is_exclusive && to_footprint.is_store
}
/// The set of registers that could be (syntactically) touched by the
/// first instruction before reaching the second.
#[allow(clippy::needless_range_loop)]
fn touched_by<B: BV>(
from: usize,
to: usize,
instrs: &[B],
footprints: &HashMap<B, Footprint>,
) -> HashSet<(Name, Vec<Accessor>)> {
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = HashSet::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if!footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.insert(wreg.clone());
}
}
}
}
if new_touched.is_empty() {
for wreg in &footprint.register_writes {
touched.remove(wreg);
}
} else {
new_touched.drain().for_each(|wreg| {
touched.insert(wreg);
})
}
}
touched
}
/// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// Panics if either `from` or `to` are out-of-bounds in `instrs`, or
/// if an instruction does not have a footprint.
pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be po-order-later than `from` for the dependency to exist.
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
// If any of the registers transitively touched by the first
// instruction's register writes can feed into a memory address
// used by the last we have an address dependency.
for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
pub fn data_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
#[allow(clippy::needless_range_loop)]
pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be a program-order later load or store
let to_footprint = footprints.get(&instrs[from]).unwrap();
if!(to_footprint.is_load || to_footprint.is_store) || (from >= to) {
return false;
}
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = Vec::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
if footprint.is_branch {
for reg in &footprint.branch_addr_taints.0 {
if touched.contains(®) {
return true;
}
}
}
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if!footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.push(wreg.clone());
}
}
}
}
new_touched.drain(..).for_each(|wreg| {
touched.insert(wreg);
})
}
false
}
#[derive(Debug)]
pub enum FootprintError {
NoIslaFootprintFn,
SymbolicInstruction,
ExecutionError(String),
}
impl fmt::Display for FootprintError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use FootprintError::*;
match self {
NoIslaFootprintFn => write!(
f,
"Footprint analysis failed. To calculate the syntactic\n\
register footprint, isla expects a sail function\n\
`isla_footprint' to be available in the model, which\n\
can be used to decode and execute an instruction"
),
SymbolicInstruction => write!(f, "Instruction opcode found during footprint analysis was symbolic"),
ExecutionError(msg) => write!(f, "{}", msg),
}
}
}
impl Error for FootprintError {
fn source(&self) -> Option<&(dyn Error +'static)> {
None
}
}
/// # Arguments
///
/// * `num_threads` - How many threads to use for analysing footprints
/// * `thread_buckets` - A vector of paths (event vectors) for each thread in the litmus test
/// * `lets` - The initial state of all top-level letbindings in the Sail specification
/// * `regs` - The initial register state
/// * `shared_state` - The state shared between all symbolic execution runs
/// * `isa_config` - The architecture specific configuration information
/// * `cache_dir` - A directory to cache footprint results
pub fn footprint_analysis<'ir, B, P>(
num_threads: usize,
thread_buckets: &[Vec<EvPath<B>>],
lets: &Bindings<'ir, B>,
regs: &Bindings<'ir, B>,
shared_state: &SharedState<B>,
isa_config: &ISAConfig<B>,
cache_dir: P,
) -> Result<HashMap<B, Footprint>, FootprintError>
where
B: BV,
P: AsRef<Path>,
{
use FootprintError::*;
let mut concrete_opcodes: HashSet<B> = HashSet::new();
let mut footprints = HashMap::new();
for thread in thread_buckets {
for path in thread {
for event in path {
match event {
Event::Instr(Val::Bits(bv)) => {
if let Some(footprint) =
Footprint::from_cache(Footprintkey { opcode: bv.to_string() }, cache_dir.as_ref())
{
footprints.insert(*bv, footprint);
} else {
concrete_opcodes.insert(*bv);
}
}
Event::Instr(_) => return Err(SymbolicInstruction),
_ => (),
}
}
}
}
log!(log::VERBOSE, &format!("Got {} uncached concrete opcodes for footprint analysis", concrete_opcodes.len()));
let function_id = match shared_state.symtab.get("zisla_footprint") {
Some(id) => id,
None => return Err(NoIslaFootprintFn),
};
let (args, _, instrs) =
shared_state.functions.get(&function_id).expect("isla_footprint function not in shared state!");
let (task_opcodes, tasks): (Vec<B>, Vec<_>) = concrete_opcodes
.iter()
.enumerate()
.map(|(i, opcode)| {
(
opcode,
LocalFrame::new(function_id, args, Some(&[Val::Bits(*opcode)]), instrs)
.add_lets(lets)
.add_regs(regs)
.task(i),
)
})
.unzip();
let mut footprint_buckets: Vec<Vec<EvPath<B>>> = vec![Vec::new(); tasks.len()];
let queue = Arc::new(SegQueue::new());
let now = Instant::now();
executor::start_multi(num_threads, None, tasks, &shared_state, queue.clone(), &executor::footprint_collector);
log!(log::VERBOSE, &format!("Footprint analysis symbolic execution took: {}ms", now.elapsed().as_millis()));
loop {
match queue.pop() {
Ok(Ok((task_id, mut events))) => {
let mut events: Vec<Event<B>> = events
.drain(..)
.rev()
// The first cycle is reserved for initialization
.skip_while(|ev|!ev.is_cycle())
.filter(|ev| ev.is_reg() || ev.is_memory() || ev.is_branch() || ev.is_smt() || ev.is_fork())
.collect();
isla_lib::simplify::remove_unused(&mut events);
footprint_buckets[task_id].push(events)
}
// Error during execution
Ok(Err(msg)) => return Err(ExecutionError(msg)),
// Empty queue
Err(_) => break,
}
}
let num_footprints: usize = footprint_buckets.iter().map(|instr_paths| instr_paths.len()).sum();
log!(log::VERBOSE, &format!("There are {} footprints", num_footprints));
let read_exclusives: Vec<usize> =
isa_config.read_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect();
let write_exclusives: Vec<usize> =
isa_config.write_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect();
for (i, paths) in footprint_buckets.iter().enumerate() {
let opcode = task_opcodes[i];
log!(log::VERBOSE, &format!("{:?}", opcode));
let mut footprint = Footprint::new();
for events in paths {
let evrefs = EventReferences::from_events(events);
let mut forks: Vec<Sym> = Vec::new();
for event in events {
match event {
Event::Fork(_, v, _) => forks.push(*v),
Event::ReadReg(reg, accessor, _) if!isa_config.ignored_registers.contains(reg) => {
footprint.register_reads.insert((*reg, accessor.clone()));
}
Event::WriteReg(reg, accessor, data) if!isa_config.ignored_registers.contains(reg) => {
footprint.register_writes.insert((*reg, accessor.clone()));
// If the data written to the register is tainted by a value read
// from memory record this fact.
if evrefs.value_taints(data, events).1 {
footprint.register_writes_tainted.insert((*reg, accessor.clone()));
}
}
Event::MarkReg { reg, mark } => {
if mark == "ignore_write" {
footprint.register_writes_ignored.insert(*reg);
}
}
Event::ReadMem { address,.. } => {
footprint.is_load = true;
if read_exclusives.iter().any(|rk| event.has_read_kind(*rk)) {
footprint.is_exclusive = true;
}
evrefs.collect_value_taints(
address,
events,
&mut footprint.mem_addr_taints.0,
&mut footprint.mem_addr_taints.1,
)
}
Event::WriteMem { address, data,.. } => {
footprint.is_store = true;
if write_exclusives.iter().any(|wk| event.has_write_kind(*wk)) {
footprint.is_exclusive = true;
}
evrefs.collect_value_taints(
address,
events,
&mut footprint.mem_addr_taints.0,
&mut footprint.mem_addr_taints.1,
);
evrefs.collect_value_taints(
data,
events,
&mut footprint.write_data_taints.0,
&mut footprint.write_data_taints.1,
);
}
Event::CacheOp { address,.. } => {
footprint.is_cache_op = true;
ev | random_line_split |
|
footprint_analysis.rs | , and use symbolic execution on each opcode again.
use crossbeam::queue::SegQueue;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::error::Error;
use std::fmt;
use std::io::Write;
use std::path::Path;
use std::sync::Arc;
use std::time::Instant;
use isla_lib::cache::{Cacheable, Cachekey};
use isla_lib::concrete::BV;
use isla_lib::config::ISAConfig;
use isla_lib::executor;
use isla_lib::executor::LocalFrame;
use isla_lib::ir::*;
use isla_lib::log;
use isla_lib::simplify::{EventReferences, Taints};
use isla_lib::smt::{Accessor, EvPath, Event, Sym};
use isla_lib::zencode;
#[derive(Debug, Serialize, Deserialize)]
pub struct Footprint {
/// Tracks which (symbolic) registers / memory reads can feed into
/// a memory write within an instruction
write_data_taints: (Taints, bool),
/// Tracks with (symbolic) registers / memory reads can feed into
/// a memory operator (read/write) address within an instruction
mem_addr_taints: (Taints, bool),
/// Tracks which (symbolic) registers / memory reads can feed into
/// the address of a branch
branch_addr_taints: (Taints, bool),
/// The set of register reads (with subfield granularity)
register_reads: HashSet<(Name, Vec<Accessor>)>,
/// The set of register writes (also with subfield granularity)
register_writes: HashSet<(Name, Vec<Accessor>)>,
/// The set of register writes where the value was tainted by a memory read
register_writes_tainted: HashSet<(Name, Vec<Accessor>)>,
/// All register writes to the following registers are ignored for
/// tracking dependencies within an instruction
register_writes_ignored: HashSet<Name>,
/// A store is any instruction with a WriteMem event
is_store: bool,
/// A load is any instruction with a ReadMem event
is_load: bool,
/// A branch is any instruction with a Branch event
is_branch: bool,
/// An exclusive is any event with an exclusive read or write kind.
is_exclusive: bool,
/// A cache-op is any event with a CacheOp event
is_cache_op: bool,
}
pub struct Footprintkey {
opcode: String,
}
impl Cachekey for Footprintkey {
fn key(&self) -> String {
format!("opcode_{}", self.opcode)
}
}
impl Cacheable for Footprint {
type Key = Footprintkey;
}
impl Footprint {
fn new() -> Self {
Footprint {
write_data_taints: (HashSet::new(), false),
mem_addr_taints: (HashSet::new(), false),
branch_addr_taints: (HashSet::new(), false),
register_reads: HashSet::new(),
register_writes: HashSet::new(),
register_writes_tainted: HashSet::new(),
register_writes_ignored: HashSet::new(),
is_store: false,
is_load: false,
is_branch: false,
is_exclusive: false,
is_cache_op: false,
}
}
/// This just prints the footprint information in a human-readable
/// form for debugging.
pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> {
write!(buf, "Footprint:\n Memory write data:")?;
for (reg, accessor) in &self.write_data_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Memory address:")?;
for (reg, accessor) in &self.mem_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Branch address:")?;
for (reg, accessor) in &self.branch_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register reads:")?;
for (reg, accessor) in &self.register_reads {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes:")?;
for (reg, accessor) in &self.register_writes {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes (tainted):")?;
for (reg, accessor) in &self.register_writes_tainted {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Is store: {}", self.is_store)?;
write!(buf, "\n Is load: {}", self.is_load)?;
write!(buf, "\n Is exclusive: {}", self.is_exclusive)?;
write!(buf, "\n Is branch: {}", self.is_branch)?;
writeln!(buf)?;
Ok(())
}
}
// There is an rmw dependency from `from` to `to` if `from` is a
// load-exclusive and `to` is a store-exclusive and there are no
// intervening exclusives.
#[allow(clippy::needless_range_loop)]
pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from > to {
return false;
}
let from_footprint = footprints.get(&instrs[from]).unwrap();
if!(from_footprint.is_exclusive && from_footprint.is_load) {
return false;
}
for i in (from + 1)..to {
if footprints.get(&instrs[i]).unwrap().is_exclusive |
}
let to_footprint = footprints.get(&instrs[to]).unwrap();
to_footprint.is_exclusive && to_footprint.is_store
}
/// The set of registers that could be (syntactically) touched by the
/// first instruction before reaching the second.
#[allow(clippy::needless_range_loop)]
fn touched_by<B: BV>(
from: usize,
to: usize,
instrs: &[B],
footprints: &HashMap<B, Footprint>,
) -> HashSet<(Name, Vec<Accessor>)> {
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = HashSet::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if!footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.insert(wreg.clone());
}
}
}
}
if new_touched.is_empty() {
for wreg in &footprint.register_writes {
touched.remove(wreg);
}
} else {
new_touched.drain().for_each(|wreg| {
touched.insert(wreg);
})
}
}
touched
}
/// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// Panics if either `from` or `to` are out-of-bounds in `instrs`, or
/// if an instruction does not have a footprint.
pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be po-order-later than `from` for the dependency to exist.
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
// If any of the registers transitively touched by the first
// instruction's register writes can feed into a memory address
// used by the last we have an address dependency.
for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
pub fn data_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
#[allow(clippy::needless_range_loop)]
pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be a program-order later load or store
let to_footprint = footprints.get(&instrs[from]).unwrap();
if!(to_footprint.is_load || to_footprint.is_store) || (from >= to) {
return false;
}
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = Vec::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
if footprint.is_branch {
for reg in &footprint.branch_addr_taints.0 {
if touched.contains(®) {
return true;
}
}
}
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if!footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.push(wreg.clone());
}
}
}
}
new_touched.drain(..).for_each(|wreg| {
touched.insert(wreg);
})
}
false
}
#[derive(Debug)]
pub enum FootprintError {
NoIslaFootprintFn,
SymbolicInstruction,
ExecutionError(String),
}
impl fmt::Display for FootprintError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use FootprintError::*;
match self {
NoIslaFootprintFn => write!(
f,
"Footprint analysis failed. To calculate the syntactic\n\
register footprint, isla expects a sail function\n\
`isla_footprint' to be available in the model, which\n\
can be used to decode and execute an instruction"
),
SymbolicInstruction => write!(f, "Instruction opcode found during footprint analysis was symbolic"),
ExecutionError(msg) => write!(f, "{}", msg),
}
}
}
impl Error for FootprintError {
fn source(&self) -> Option<&(dyn Error +'static)> {
None
}
}
/// # Arguments
///
/// * `num_threads` - How many threads to use for analysing footprints
/// * `thread_buckets` - A vector of paths (event vectors) for each thread in the litmus test
/// * `lets` - The initial state of all top-level letbindings in the Sail specification
/// * `regs` - The initial register state
/// * `shared_state` - The state shared between all symbolic execution runs
/// * `isa_config` - The architecture specific configuration information
/// * `cache_dir` - A directory to cache footprint results
pub fn footprint_analysis<'ir, B, P>(
num_threads: usize,
thread_buckets: &[Vec<EvPath<B>>],
lets: &Bindings<'ir, B>,
regs: &Bindings<'ir, B>,
shared_state: &SharedState<B>,
isa_config: &ISAConfig<B>,
cache_dir: P,
) -> Result<HashMap<B, Footprint>, FootprintError>
where
B: BV,
P: AsRef<Path>,
{
use FootprintError::*;
let mut concrete_opcodes: HashSet<B> = HashSet::new();
let mut footprints = HashMap::new();
for thread in thread_buckets {
for path in thread {
for event in path {
match event {
Event::Instr(Val::Bits(bv)) => {
if let Some(footprint) =
Footprint::from_cache(Footprintkey { opcode: bv.to_string() }, cache_dir.as_ref())
{
footprints.insert(*bv, footprint);
} else {
concrete_opcodes.insert(*bv);
}
}
Event::Instr(_) => return Err(SymbolicInstruction),
_ => (),
}
}
}
}
log!(log::VERBOSE, &format!("Got {} uncached concrete opcodes for footprint analysis", concrete_opcodes.len()));
let function_id = match shared_state.symtab.get("zisla_footprint") {
Some(id) => id,
None => return Err(NoIslaFootprintFn),
};
let (args, _, instrs) =
shared_state.functions.get(&function_id).expect("isla_footprint function not in shared state!");
let (task_opcodes, tasks): (Vec<B>, Vec<_>) = concrete_opcodes
.iter()
.enumerate()
.map(|(i, opcode)| {
(
opcode,
LocalFrame::new(function_id, args, Some(&[Val::Bits(*opcode)]), instrs)
.add_lets(lets)
.add_regs(regs)
.task(i),
)
})
.unzip();
let mut footprint_buckets: Vec<Vec<EvPath<B>>> = vec![Vec::new(); tasks.len()];
let queue = Arc::new(SegQueue::new());
let now = Instant::now();
executor::start_multi(num_threads, None, tasks, &shared_state, queue.clone(), &executor::footprint_collector);
log!(log::VERBOSE, &format!("Footprint analysis symbolic execution took: {}ms", now.elapsed().as_millis()));
loop {
match queue.pop() {
Ok(Ok((task_id, mut events))) => {
let mut events: Vec<Event<B>> = events
.drain(..)
.rev()
// The first cycle is reserved for initialization
.skip_while(|ev|!ev.is_cycle())
.filter(|ev| ev.is_reg() || ev.is_memory() || ev.is_branch() || ev.is_smt() || ev.is_fork())
.collect();
isla_lib::simplify::remove_unused(&mut events);
footprint_buckets[task_id].push(events)
}
// Error during execution
Ok(Err(msg)) => return Err(ExecutionError(msg)),
// Empty queue
Err(_) => break,
}
}
let num_footprints: usize = footprint_buckets.iter().map(|instr_paths| instr_paths.len()).sum();
log!(log::VERBOSE, &format!("There are {} footprints", num_footprints));
let read_exclusives: Vec<usize> =
isa_config.read_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect();
let write_exclusives: Vec<usize> =
isa_config.write_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect();
for (i, paths) in footprint_buckets.iter().enumerate() {
let opcode = task_opcodes[i];
log!(log::VERBOSE, &format!("{:?}", opcode));
let mut footprint = Footprint::new();
for events in paths {
let evrefs = EventReferences::from_events(events);
let mut forks: Vec<Sym> = Vec::new();
for event in events {
match event {
Event::Fork(_, v, _) => forks.push(*v),
Event::ReadReg(reg, accessor, _) if!isa_config.ignored_registers.contains(reg) => {
footprint.register_reads.insert((*reg, accessor.clone()));
}
Event::WriteReg(reg, accessor, data) if!isa_config.ignored_registers.contains(reg) => {
footprint.register_writes.insert((*reg, accessor.clone()));
// If the data written to the register is tainted by a value read
// from memory record this fact.
if evrefs.value_taints(data, events).1 {
footprint.register_writes_tainted.insert((*reg, accessor.clone()));
}
}
Event::MarkReg { reg, mark } => {
if mark == "ignore_write" {
footprint.register_writes_ignored.insert(*reg);
}
}
Event::ReadMem { address,.. } => {
footprint.is_load = true;
if read_exclusives.iter().any(|rk| event.has_read_kind(*rk)) {
footprint.is_exclusive = true;
}
evrefs.collect_value_taints(
address,
events,
&mut footprint.mem_addr_taints.0,
&mut footprint.mem_addr_taints.1,
)
}
Event::WriteMem { address, data,.. } => {
footprint.is_store = true;
if write_exclusives.iter().any(|wk| event.has_write_kind(*wk)) {
footprint.is_exclusive = true;
}
evrefs.collect_value_taints(
address,
events,
&mut footprint.mem_addr_taints.0,
&mut footprint.mem_addr_taints.1,
);
evrefs.collect_value_taints(
data,
events,
&mut footprint.write_data_taints.0,
&mut footprint.write_data_taints.1,
);
}
Event::CacheOp { address,.. } => {
footprint.is_cache_op = true;
| {
return false;
} | conditional_block |
mod.rs | // Copyright © 2018 Cormac O'Brien
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
mod music;
pub use music::MusicPlayer;
use std::{
cell::{Cell, RefCell},
io::{self, BufReader, Cursor, Read},
};
use crate::common::vfs::{Vfs, VfsError};
use cgmath::{InnerSpace, Vector3};
use rodio::{
source::{Buffered, SamplesConverter},
Decoder, OutputStreamHandle, Sink, Source,
};
use thiserror::Error;
use chrono::Duration;
pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001;
const MAX_ENTITY_CHANNELS: usize = 128;
#[derive(Error, Debug)]
pub enum SoundError {
#[error("No such music track: {0}")]
NoSuchTrack(String),
#[error("I/O error: {0}")]
Io(#[from] io::Error),
#[error("Virtual filesystem error: {0}")]
Vfs(#[from] VfsError),
#[error("WAV decoder error: {0}")]
Decoder(#[from] rodio::decoder::DecoderError),
}
/// Data needed for sound spatialization.
///
/// This struct is updated every frame.
#[derive(Debug)]
pub struct Listener {
origin: Cell<Vector3<f32>>,
left_ear: Cell<Vector3<f32>>,
right_ear: Cell<Vector3<f32>>,
}
impl Listener {
pub fn new() -> Listener {
Listener {
origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
}
}
pub fn origin(&self) -> Vector3<f32> {
self.origin.get()
}
pub fn left_ear(&self) -> Vector3<f32> {
self.left_ear.get()
}
pub fn right_ear(&self) -> Vector3<f32> {
self.right_ear.get()
}
pub fn s | &self, new_origin: Vector3<f32>) {
self.origin.set(new_origin);
}
pub fn set_left_ear(&self, new_origin: Vector3<f32>) {
self.left_ear.set(new_origin);
}
pub fn set_right_ear(&self, new_origin: Vector3<f32>) {
self.right_ear.set(new_origin);
}
pub fn attenuate(
&self,
emitter_origin: Vector3<f32>,
base_volume: f32,
attenuation: f32,
) -> f32 {
let decay = (emitter_origin - self.origin.get()).magnitude()
* attenuation
* DISTANCE_ATTENUATION_FACTOR;
let volume = ((1.0 - decay) * base_volume).max(0.0);
volume
}
}
#[derive(Clone)]
pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>);
impl AudioSource {
pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError>
where
S: AsRef<str>,
{
let name = name.as_ref();
let full_path = "sound/".to_owned() + name;
let mut file = vfs.open(&full_path)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
let src = Decoder::new(Cursor::new(data))?
.convert_samples()
.buffered();
Ok(AudioSource(src))
}
}
pub struct StaticSound {
origin: Vector3<f32>,
sink: RefCell<Sink>,
volume: f32,
attenuation: f32,
}
impl StaticSound {
pub fn new(
stream: &OutputStreamHandle,
origin: Vector3<f32>,
src: AudioSource,
volume: f32,
attenuation: f32,
listener: &Listener,
) -> StaticSound {
// TODO: handle PlayError once PR accepted
let sink = Sink::try_new(&stream).unwrap();
let infinite = src.0.clone().repeat_infinite();
sink.append(infinite);
sink.set_volume(listener.attenuate(origin, volume, attenuation));
StaticSound {
origin,
sink: RefCell::new(sink),
volume,
attenuation,
}
}
pub fn update(&self, listener: &Listener) {
let sink = self.sink.borrow_mut();
sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation));
}
}
/// Represents a single audio channel, capable of playing one sound at a time.
pub struct Channel {
stream: OutputStreamHandle,
sink: RefCell<Option<Sink>>,
master_vol: Cell<f32>,
attenuation: Cell<f32>,
}
impl Channel {
/// Create a new `Channel` backed by the given `Device`.
pub fn new(stream: OutputStreamHandle) -> Channel {
Channel {
stream,
sink: RefCell::new(None),
master_vol: Cell::new(0.0),
attenuation: Cell::new(0.0),
}
}
/// Play a new sound on this channel, cutting off any sound that was previously playing.
pub fn play(
&self,
src: AudioSource,
ent_pos: Vector3<f32>,
listener: &Listener,
volume: f32,
attenuation: f32,
) {
self.master_vol.set(volume);
self.attenuation.set(attenuation);
// stop the old sound
self.sink.replace(None);
// start the new sound
let new_sink = Sink::try_new(&self.stream).unwrap();
new_sink.append(src.0);
new_sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
self.sink.replace(Some(new_sink));
}
pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) {
if let Some(ref sink) = *self.sink.borrow_mut() {
// attenuate using quake coordinates since distance is the same either way
sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
};
}
/// Stop the sound currently playing on this channel, if there is one.
pub fn stop(&self) {
self.sink.replace(None);
}
/// Returns whether or not this `Channel` is currently in use.
pub fn in_use(&self) -> bool {
let replace_sink;
match *self.sink.borrow() {
Some(ref sink) => replace_sink = sink.empty(),
None => return false,
}
// if the sink isn't in use, free it
if replace_sink {
self.sink.replace(None);
false
} else {
true
}
}
}
pub struct EntityChannel {
start_time: Duration,
// if None, sound is associated with a temp entity
ent_id: Option<usize>,
ent_channel: i8,
channel: Channel,
}
impl EntityChannel {
pub fn channel(&self) -> &Channel {
&self.channel
}
pub fn entity_id(&self) -> Option<usize> {
self.ent_id
}
}
pub struct EntityMixer {
stream: OutputStreamHandle,
// TODO: replace with an array once const type parameters are implemented
channels: Box<[Option<EntityChannel>]>,
}
impl EntityMixer {
pub fn new(stream: OutputStreamHandle) -> EntityMixer {
let mut channel_vec = Vec::new();
for _ in 0..MAX_ENTITY_CHANNELS {
channel_vec.push(None);
}
EntityMixer {
stream,
channels: channel_vec.into_boxed_slice(),
}
}
fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize {
let mut oldest = 0;
for (i, channel) in self.channels.iter().enumerate() {
match *channel {
Some(ref chan) => {
// if this channel is free, return it
if!chan.channel.in_use() {
return i;
}
// replace sounds on the same entity channel
if ent_channel!= 0
&& chan.ent_id == ent_id
&& (chan.ent_channel == ent_channel || ent_channel == -1)
{
return i;
}
// TODO: don't clobber player sounds with monster sounds
// keep track of which sound started the earliest
match self.channels[oldest] {
Some(ref o) => {
if chan.start_time < o.start_time {
oldest = i;
}
}
None => oldest = i,
}
}
None => return i,
}
}
// if there are no good channels, just replace the one that's been running the longest
oldest
}
pub fn start_sound(
&mut self,
src: AudioSource,
time: Duration,
ent_id: Option<usize>,
ent_channel: i8,
volume: f32,
attenuation: f32,
origin: Vector3<f32>,
listener: &Listener,
) {
let chan_id = self.find_free_channel(ent_id, ent_channel);
let new_channel = Channel::new(self.stream.clone());
new_channel.play(
src.clone(),
origin,
listener,
volume,
attenuation,
);
self.channels[chan_id] = Some(EntityChannel {
start_time: time,
ent_id,
ent_channel,
channel: new_channel,
})
}
pub fn iter_entity_channels(&self) -> impl Iterator<Item = &EntityChannel> {
self.channels.iter().filter_map(|e| e.as_ref())
}
pub fn stream(&self) -> OutputStreamHandle {
self.stream.clone()
}
}
| et_origin( | identifier_name |
mod.rs | // Copyright © 2018 Cormac O'Brien
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
mod music;
pub use music::MusicPlayer;
use std::{
cell::{Cell, RefCell},
io::{self, BufReader, Cursor, Read},
};
use crate::common::vfs::{Vfs, VfsError};
| };
use thiserror::Error;
use chrono::Duration;
pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001;
const MAX_ENTITY_CHANNELS: usize = 128;
#[derive(Error, Debug)]
pub enum SoundError {
#[error("No such music track: {0}")]
NoSuchTrack(String),
#[error("I/O error: {0}")]
Io(#[from] io::Error),
#[error("Virtual filesystem error: {0}")]
Vfs(#[from] VfsError),
#[error("WAV decoder error: {0}")]
Decoder(#[from] rodio::decoder::DecoderError),
}
/// Data needed for sound spatialization.
///
/// This struct is updated every frame.
#[derive(Debug)]
pub struct Listener {
origin: Cell<Vector3<f32>>,
left_ear: Cell<Vector3<f32>>,
right_ear: Cell<Vector3<f32>>,
}
impl Listener {
pub fn new() -> Listener {
Listener {
origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
}
}
pub fn origin(&self) -> Vector3<f32> {
self.origin.get()
}
pub fn left_ear(&self) -> Vector3<f32> {
self.left_ear.get()
}
pub fn right_ear(&self) -> Vector3<f32> {
self.right_ear.get()
}
pub fn set_origin(&self, new_origin: Vector3<f32>) {
self.origin.set(new_origin);
}
pub fn set_left_ear(&self, new_origin: Vector3<f32>) {
self.left_ear.set(new_origin);
}
pub fn set_right_ear(&self, new_origin: Vector3<f32>) {
self.right_ear.set(new_origin);
}
pub fn attenuate(
&self,
emitter_origin: Vector3<f32>,
base_volume: f32,
attenuation: f32,
) -> f32 {
let decay = (emitter_origin - self.origin.get()).magnitude()
* attenuation
* DISTANCE_ATTENUATION_FACTOR;
let volume = ((1.0 - decay) * base_volume).max(0.0);
volume
}
}
#[derive(Clone)]
pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>);
impl AudioSource {
pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError>
where
S: AsRef<str>,
{
let name = name.as_ref();
let full_path = "sound/".to_owned() + name;
let mut file = vfs.open(&full_path)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
let src = Decoder::new(Cursor::new(data))?
.convert_samples()
.buffered();
Ok(AudioSource(src))
}
}
pub struct StaticSound {
origin: Vector3<f32>,
sink: RefCell<Sink>,
volume: f32,
attenuation: f32,
}
impl StaticSound {
pub fn new(
stream: &OutputStreamHandle,
origin: Vector3<f32>,
src: AudioSource,
volume: f32,
attenuation: f32,
listener: &Listener,
) -> StaticSound {
// TODO: handle PlayError once PR accepted
let sink = Sink::try_new(&stream).unwrap();
let infinite = src.0.clone().repeat_infinite();
sink.append(infinite);
sink.set_volume(listener.attenuate(origin, volume, attenuation));
StaticSound {
origin,
sink: RefCell::new(sink),
volume,
attenuation,
}
}
pub fn update(&self, listener: &Listener) {
let sink = self.sink.borrow_mut();
sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation));
}
}
/// Represents a single audio channel, capable of playing one sound at a time.
pub struct Channel {
stream: OutputStreamHandle,
sink: RefCell<Option<Sink>>,
master_vol: Cell<f32>,
attenuation: Cell<f32>,
}
impl Channel {
/// Create a new `Channel` backed by the given `Device`.
pub fn new(stream: OutputStreamHandle) -> Channel {
Channel {
stream,
sink: RefCell::new(None),
master_vol: Cell::new(0.0),
attenuation: Cell::new(0.0),
}
}
/// Play a new sound on this channel, cutting off any sound that was previously playing.
pub fn play(
&self,
src: AudioSource,
ent_pos: Vector3<f32>,
listener: &Listener,
volume: f32,
attenuation: f32,
) {
self.master_vol.set(volume);
self.attenuation.set(attenuation);
// stop the old sound
self.sink.replace(None);
// start the new sound
let new_sink = Sink::try_new(&self.stream).unwrap();
new_sink.append(src.0);
new_sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
self.sink.replace(Some(new_sink));
}
pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) {
if let Some(ref sink) = *self.sink.borrow_mut() {
// attenuate using quake coordinates since distance is the same either way
sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
};
}
/// Stop the sound currently playing on this channel, if there is one.
pub fn stop(&self) {
self.sink.replace(None);
}
/// Returns whether or not this `Channel` is currently in use.
pub fn in_use(&self) -> bool {
let replace_sink;
match *self.sink.borrow() {
Some(ref sink) => replace_sink = sink.empty(),
None => return false,
}
// if the sink isn't in use, free it
if replace_sink {
self.sink.replace(None);
false
} else {
true
}
}
}
pub struct EntityChannel {
start_time: Duration,
// if None, sound is associated with a temp entity
ent_id: Option<usize>,
ent_channel: i8,
channel: Channel,
}
impl EntityChannel {
pub fn channel(&self) -> &Channel {
&self.channel
}
pub fn entity_id(&self) -> Option<usize> {
self.ent_id
}
}
pub struct EntityMixer {
stream: OutputStreamHandle,
// TODO: replace with an array once const type parameters are implemented
channels: Box<[Option<EntityChannel>]>,
}
impl EntityMixer {
pub fn new(stream: OutputStreamHandle) -> EntityMixer {
let mut channel_vec = Vec::new();
for _ in 0..MAX_ENTITY_CHANNELS {
channel_vec.push(None);
}
EntityMixer {
stream,
channels: channel_vec.into_boxed_slice(),
}
}
fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize {
let mut oldest = 0;
for (i, channel) in self.channels.iter().enumerate() {
match *channel {
Some(ref chan) => {
// if this channel is free, return it
if!chan.channel.in_use() {
return i;
}
// replace sounds on the same entity channel
if ent_channel!= 0
&& chan.ent_id == ent_id
&& (chan.ent_channel == ent_channel || ent_channel == -1)
{
return i;
}
// TODO: don't clobber player sounds with monster sounds
// keep track of which sound started the earliest
match self.channels[oldest] {
Some(ref o) => {
if chan.start_time < o.start_time {
oldest = i;
}
}
None => oldest = i,
}
}
None => return i,
}
}
// if there are no good channels, just replace the one that's been running the longest
oldest
}
pub fn start_sound(
&mut self,
src: AudioSource,
time: Duration,
ent_id: Option<usize>,
ent_channel: i8,
volume: f32,
attenuation: f32,
origin: Vector3<f32>,
listener: &Listener,
) {
let chan_id = self.find_free_channel(ent_id, ent_channel);
let new_channel = Channel::new(self.stream.clone());
new_channel.play(
src.clone(),
origin,
listener,
volume,
attenuation,
);
self.channels[chan_id] = Some(EntityChannel {
start_time: time,
ent_id,
ent_channel,
channel: new_channel,
})
}
pub fn iter_entity_channels(&self) -> impl Iterator<Item = &EntityChannel> {
self.channels.iter().filter_map(|e| e.as_ref())
}
pub fn stream(&self) -> OutputStreamHandle {
self.stream.clone()
}
} | use cgmath::{InnerSpace, Vector3};
use rodio::{
source::{Buffered, SamplesConverter},
Decoder, OutputStreamHandle, Sink, Source, | random_line_split |
mod.rs | // Copyright © 2018 Cormac O'Brien
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
mod music;
pub use music::MusicPlayer;
use std::{
cell::{Cell, RefCell},
io::{self, BufReader, Cursor, Read},
};
use crate::common::vfs::{Vfs, VfsError};
use cgmath::{InnerSpace, Vector3};
use rodio::{
source::{Buffered, SamplesConverter},
Decoder, OutputStreamHandle, Sink, Source,
};
use thiserror::Error;
use chrono::Duration;
pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001;
const MAX_ENTITY_CHANNELS: usize = 128;
#[derive(Error, Debug)]
pub enum SoundError {
#[error("No such music track: {0}")]
NoSuchTrack(String),
#[error("I/O error: {0}")]
Io(#[from] io::Error),
#[error("Virtual filesystem error: {0}")]
Vfs(#[from] VfsError),
#[error("WAV decoder error: {0}")]
Decoder(#[from] rodio::decoder::DecoderError),
}
/// Data needed for sound spatialization.
///
/// This struct is updated every frame.
#[derive(Debug)]
pub struct Listener {
origin: Cell<Vector3<f32>>,
left_ear: Cell<Vector3<f32>>,
right_ear: Cell<Vector3<f32>>,
}
impl Listener {
pub fn new() -> Listener {
Listener {
origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
}
}
pub fn origin(&self) -> Vector3<f32> {
self.origin.get()
}
pub fn left_ear(&self) -> Vector3<f32> {
self.left_ear.get()
}
pub fn right_ear(&self) -> Vector3<f32> {
self.right_ear.get()
}
pub fn set_origin(&self, new_origin: Vector3<f32>) {
self.origin.set(new_origin);
}
pub fn set_left_ear(&self, new_origin: Vector3<f32>) {
self.left_ear.set(new_origin);
}
pub fn set_right_ear(&self, new_origin: Vector3<f32>) {
self.right_ear.set(new_origin);
}
pub fn attenuate(
&self,
emitter_origin: Vector3<f32>,
base_volume: f32,
attenuation: f32,
) -> f32 {
let decay = (emitter_origin - self.origin.get()).magnitude()
* attenuation
* DISTANCE_ATTENUATION_FACTOR;
let volume = ((1.0 - decay) * base_volume).max(0.0);
volume
}
}
#[derive(Clone)]
pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>);
impl AudioSource {
pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError>
where
S: AsRef<str>,
{
let name = name.as_ref();
let full_path = "sound/".to_owned() + name;
let mut file = vfs.open(&full_path)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
let src = Decoder::new(Cursor::new(data))?
.convert_samples()
.buffered();
Ok(AudioSource(src))
}
}
pub struct StaticSound {
origin: Vector3<f32>,
sink: RefCell<Sink>,
volume: f32,
attenuation: f32,
}
impl StaticSound {
pub fn new(
stream: &OutputStreamHandle,
origin: Vector3<f32>,
src: AudioSource,
volume: f32,
attenuation: f32,
listener: &Listener,
) -> StaticSound {
// TODO: handle PlayError once PR accepted
let sink = Sink::try_new(&stream).unwrap();
let infinite = src.0.clone().repeat_infinite();
sink.append(infinite);
sink.set_volume(listener.attenuate(origin, volume, attenuation));
StaticSound {
origin,
sink: RefCell::new(sink),
volume,
attenuation,
}
}
pub fn update(&self, listener: &Listener) {
let sink = self.sink.borrow_mut();
sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation));
}
}
/// Represents a single audio channel, capable of playing one sound at a time.
pub struct Channel {
stream: OutputStreamHandle,
sink: RefCell<Option<Sink>>,
master_vol: Cell<f32>,
attenuation: Cell<f32>,
}
impl Channel {
/// Create a new `Channel` backed by the given `Device`.
pub fn new(stream: OutputStreamHandle) -> Channel {
Channel {
stream,
sink: RefCell::new(None),
master_vol: Cell::new(0.0),
attenuation: Cell::new(0.0),
}
}
/// Play a new sound on this channel, cutting off any sound that was previously playing.
pub fn play(
&self,
src: AudioSource,
ent_pos: Vector3<f32>,
listener: &Listener,
volume: f32,
attenuation: f32,
) {
self.master_vol.set(volume);
self.attenuation.set(attenuation);
// stop the old sound
self.sink.replace(None);
// start the new sound
let new_sink = Sink::try_new(&self.stream).unwrap();
new_sink.append(src.0);
new_sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
self.sink.replace(Some(new_sink));
}
pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) {
if let Some(ref sink) = *self.sink.borrow_mut() {
// attenuate using quake coordinates since distance is the same either way
sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
};
}
/// Stop the sound currently playing on this channel, if there is one.
pub fn stop(&self) {
self.sink.replace(None);
}
/// Returns whether or not this `Channel` is currently in use.
pub fn in_use(&self) -> bool {
let replace_sink;
match *self.sink.borrow() {
Some(ref sink) => replace_sink = sink.empty(),
None => return false,
}
// if the sink isn't in use, free it
if replace_sink {
self.sink.replace(None);
false
} else {
true
}
}
}
pub struct EntityChannel {
start_time: Duration,
// if None, sound is associated with a temp entity
ent_id: Option<usize>,
ent_channel: i8,
channel: Channel,
}
impl EntityChannel {
pub fn channel(&self) -> &Channel {
&self.channel
}
pub fn entity_id(&self) -> Option<usize> {
self.ent_id
}
}
pub struct EntityMixer {
stream: OutputStreamHandle,
// TODO: replace with an array once const type parameters are implemented
channels: Box<[Option<EntityChannel>]>,
}
impl EntityMixer {
pub fn new(stream: OutputStreamHandle) -> EntityMixer {
let mut channel_vec = Vec::new();
for _ in 0..MAX_ENTITY_CHANNELS {
channel_vec.push(None);
}
EntityMixer {
stream,
channels: channel_vec.into_boxed_slice(),
}
}
fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize { |
// keep track of which sound started the earliest
match self.channels[oldest] {
Some(ref o) => {
if chan.start_time < o.start_time {
oldest = i;
}
}
None => oldest = i,
}
}
None => return i,
}
}
// if there are no good channels, just replace the one that's been running the longest
oldest
}
pub fn start_sound(
&mut self,
src: AudioSource,
time: Duration,
ent_id: Option<usize>,
ent_channel: i8,
volume: f32,
attenuation: f32,
origin: Vector3<f32>,
listener: &Listener,
) {
let chan_id = self.find_free_channel(ent_id, ent_channel);
let new_channel = Channel::new(self.stream.clone());
new_channel.play(
src.clone(),
origin,
listener,
volume,
attenuation,
);
self.channels[chan_id] = Some(EntityChannel {
start_time: time,
ent_id,
ent_channel,
channel: new_channel,
})
}
pub fn iter_entity_channels(&self) -> impl Iterator<Item = &EntityChannel> {
self.channels.iter().filter_map(|e| e.as_ref())
}
pub fn stream(&self) -> OutputStreamHandle {
self.stream.clone()
}
}
|
let mut oldest = 0;
for (i, channel) in self.channels.iter().enumerate() {
match *channel {
Some(ref chan) => {
// if this channel is free, return it
if !chan.channel.in_use() {
return i;
}
// replace sounds on the same entity channel
if ent_channel != 0
&& chan.ent_id == ent_id
&& (chan.ent_channel == ent_channel || ent_channel == -1)
{
return i;
}
// TODO: don't clobber player sounds with monster sounds | identifier_body |
mod.rs | // Copyright © 2018 Cormac O'Brien
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
mod music;
pub use music::MusicPlayer;
use std::{
cell::{Cell, RefCell},
io::{self, BufReader, Cursor, Read},
};
use crate::common::vfs::{Vfs, VfsError};
use cgmath::{InnerSpace, Vector3};
use rodio::{
source::{Buffered, SamplesConverter},
Decoder, OutputStreamHandle, Sink, Source,
};
use thiserror::Error;
use chrono::Duration;
pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001;
const MAX_ENTITY_CHANNELS: usize = 128;
#[derive(Error, Debug)]
pub enum SoundError {
#[error("No such music track: {0}")]
NoSuchTrack(String),
#[error("I/O error: {0}")]
Io(#[from] io::Error),
#[error("Virtual filesystem error: {0}")]
Vfs(#[from] VfsError),
#[error("WAV decoder error: {0}")]
Decoder(#[from] rodio::decoder::DecoderError),
}
/// Data needed for sound spatialization.
///
/// This struct is updated every frame.
#[derive(Debug)]
pub struct Listener {
origin: Cell<Vector3<f32>>,
left_ear: Cell<Vector3<f32>>,
right_ear: Cell<Vector3<f32>>,
}
impl Listener {
pub fn new() -> Listener {
Listener {
origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
}
}
pub fn origin(&self) -> Vector3<f32> {
self.origin.get()
}
pub fn left_ear(&self) -> Vector3<f32> {
self.left_ear.get()
}
pub fn right_ear(&self) -> Vector3<f32> {
self.right_ear.get()
}
pub fn set_origin(&self, new_origin: Vector3<f32>) {
self.origin.set(new_origin);
}
pub fn set_left_ear(&self, new_origin: Vector3<f32>) {
self.left_ear.set(new_origin);
}
pub fn set_right_ear(&self, new_origin: Vector3<f32>) {
self.right_ear.set(new_origin);
}
pub fn attenuate(
&self,
emitter_origin: Vector3<f32>,
base_volume: f32,
attenuation: f32,
) -> f32 {
let decay = (emitter_origin - self.origin.get()).magnitude()
* attenuation
* DISTANCE_ATTENUATION_FACTOR;
let volume = ((1.0 - decay) * base_volume).max(0.0);
volume
}
}
#[derive(Clone)]
pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>);
impl AudioSource {
pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError>
where
S: AsRef<str>,
{
let name = name.as_ref();
let full_path = "sound/".to_owned() + name;
let mut file = vfs.open(&full_path)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
let src = Decoder::new(Cursor::new(data))?
.convert_samples()
.buffered();
Ok(AudioSource(src))
}
}
pub struct StaticSound {
origin: Vector3<f32>,
sink: RefCell<Sink>,
volume: f32,
attenuation: f32,
}
impl StaticSound {
pub fn new(
stream: &OutputStreamHandle,
origin: Vector3<f32>,
src: AudioSource,
volume: f32,
attenuation: f32,
listener: &Listener,
) -> StaticSound {
// TODO: handle PlayError once PR accepted
let sink = Sink::try_new(&stream).unwrap();
let infinite = src.0.clone().repeat_infinite();
sink.append(infinite);
sink.set_volume(listener.attenuate(origin, volume, attenuation));
StaticSound {
origin,
sink: RefCell::new(sink),
volume,
attenuation,
}
}
pub fn update(&self, listener: &Listener) {
let sink = self.sink.borrow_mut();
sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation));
}
}
/// Represents a single audio channel, capable of playing one sound at a time.
pub struct Channel {
stream: OutputStreamHandle,
sink: RefCell<Option<Sink>>,
master_vol: Cell<f32>,
attenuation: Cell<f32>,
}
impl Channel {
/// Create a new `Channel` backed by the given `Device`.
pub fn new(stream: OutputStreamHandle) -> Channel {
Channel {
stream,
sink: RefCell::new(None),
master_vol: Cell::new(0.0),
attenuation: Cell::new(0.0),
}
}
/// Play a new sound on this channel, cutting off any sound that was previously playing.
pub fn play(
&self,
src: AudioSource,
ent_pos: Vector3<f32>,
listener: &Listener,
volume: f32,
attenuation: f32,
) {
self.master_vol.set(volume);
self.attenuation.set(attenuation);
// stop the old sound
self.sink.replace(None);
// start the new sound
let new_sink = Sink::try_new(&self.stream).unwrap();
new_sink.append(src.0);
new_sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
self.sink.replace(Some(new_sink));
}
pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) {
if let Some(ref sink) = *self.sink.borrow_mut() {
// attenuate using quake coordinates since distance is the same either way
sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
};
}
/// Stop the sound currently playing on this channel, if there is one.
pub fn stop(&self) {
self.sink.replace(None);
}
/// Returns whether or not this `Channel` is currently in use.
pub fn in_use(&self) -> bool {
let replace_sink;
match *self.sink.borrow() {
Some(ref sink) => replace_sink = sink.empty(),
None => return false,
}
// if the sink isn't in use, free it
if replace_sink {
self.sink.replace(None);
false
} else {
true
}
}
}
pub struct EntityChannel {
start_time: Duration,
// if None, sound is associated with a temp entity
ent_id: Option<usize>,
ent_channel: i8,
channel: Channel,
}
impl EntityChannel {
pub fn channel(&self) -> &Channel {
&self.channel
}
pub fn entity_id(&self) -> Option<usize> {
self.ent_id
}
}
pub struct EntityMixer {
stream: OutputStreamHandle,
// TODO: replace with an array once const type parameters are implemented
channels: Box<[Option<EntityChannel>]>,
}
impl EntityMixer {
pub fn new(stream: OutputStreamHandle) -> EntityMixer {
let mut channel_vec = Vec::new();
for _ in 0..MAX_ENTITY_CHANNELS {
channel_vec.push(None);
}
EntityMixer {
stream,
channels: channel_vec.into_boxed_slice(),
}
}
fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize {
let mut oldest = 0;
for (i, channel) in self.channels.iter().enumerate() {
match *channel {
Some(ref chan) => {
// if this channel is free, return it
if!chan.channel.in_use() { |
// replace sounds on the same entity channel
if ent_channel!= 0
&& chan.ent_id == ent_id
&& (chan.ent_channel == ent_channel || ent_channel == -1)
{
return i;
}
// TODO: don't clobber player sounds with monster sounds
// keep track of which sound started the earliest
match self.channels[oldest] {
Some(ref o) => {
if chan.start_time < o.start_time {
oldest = i;
}
}
None => oldest = i,
}
}
None => return i,
}
}
// if there are no good channels, just replace the one that's been running the longest
oldest
}
pub fn start_sound(
&mut self,
src: AudioSource,
time: Duration,
ent_id: Option<usize>,
ent_channel: i8,
volume: f32,
attenuation: f32,
origin: Vector3<f32>,
listener: &Listener,
) {
let chan_id = self.find_free_channel(ent_id, ent_channel);
let new_channel = Channel::new(self.stream.clone());
new_channel.play(
src.clone(),
origin,
listener,
volume,
attenuation,
);
self.channels[chan_id] = Some(EntityChannel {
start_time: time,
ent_id,
ent_channel,
channel: new_channel,
})
}
pub fn iter_entity_channels(&self) -> impl Iterator<Item = &EntityChannel> {
self.channels.iter().filter_map(|e| e.as_ref())
}
pub fn stream(&self) -> OutputStreamHandle {
self.stream.clone()
}
}
|
return i;
}
| conditional_block |
dropck.rs | ::free_region::FreeRegionMap;
use rustc::infer;
use middle::region;
use rustc::ty::subst::{Subst, Substs};
use rustc::ty::{self, AdtKind, Ty, TyCtxt};
use rustc::traits::{self, Reveal};
use util::nodemap::FnvHashSet;
use syntax::ast;
use syntax_pos::{self, Span};
/// check_drop_impl confirms that the Drop implementation identfied by
/// `drop_impl_did` is not any more specialized than the type it is
/// attached to (Issue #8142).
///
/// This means:
///
/// 1. The self type must be nominal (this is already checked during
/// coherence),
///
/// 2. The generic region/type parameters of the impl's self-type must
/// all be parameters of the Drop impl itself (i.e. no
/// specialization like `impl Drop for Foo<i32>`), and,
///
/// 3. Any bounds on the generic parameters must be reflected in the
/// struct/enum definition for the nominal type itself (i.e.
/// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> {... }`).
///
pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> {
let dtor_self_type = ccx.tcx.lookup_item_type(drop_impl_did).ty;
let dtor_predicates = ccx.tcx.lookup_predicates(drop_impl_did);
match dtor_self_type.sty {
ty::TyAdt(adt_def, self_to_impl_substs) => {
ensure_drop_params_and_item_params_correspond(ccx,
drop_impl_did,
dtor_self_type,
adt_def.did)?;
ensure_drop_predicates_are_implied_by_item_defn(ccx,
drop_impl_did,
&dtor_predicates,
adt_def.did,
self_to_impl_substs)
}
_ => {
// Destructors only work on nominal types. This was
// already checked by coherence, so we can panic here.
let span = ccx.tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
span_bug!(span,
"should have been rejected by coherence check: {}",
dtor_self_type);
}
}
}
fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
drop_impl_ty: Ty<'tcx>,
self_type_did: DefId) -> Result<(), ()>
{
let tcx = ccx.tcx;
let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap();
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
// check that the impl type can be made to match the trait type.
let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id);
tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| {
let tcx = infcx.tcx;
let mut fulfillment_cx = traits::FulfillmentContext::new();
let named_type = tcx.lookup_item_type(self_type_did).ty;
let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs);
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
let fresh_impl_substs =
infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did);
let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs);
if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span),
named_type, fresh_impl_self_ty) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0366,
"Implementations of Drop cannot be specialized")
.span_note(item_span,
"Use same sequence of generic type and region \
parameters that is on the struct/enum definition")
.emit();
return Err(());
}
if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) {
// this could be reached when we get lazy normalization
infcx.report_fulfillment_errors(errors);
return Err(());
}
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id);
Ok(())
})
}
/// Confirms that every predicate imposed by dtor_predicates is
/// implied by assuming the predicates attached to self_type_did.
fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
dtor_predicates: &ty::GenericPredicates<'tcx>,
self_type_did: DefId,
self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> {
// Here is an example, analogous to that from
// `compare_impl_method`.
//
// Consider a struct type:
//
// struct Type<'c, 'b:'c, 'a> {
// x: &'a Contents // (contents are irrelevant;
// y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.)
// }
//
// and a Drop impl:
//
// impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> {
// fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y)
// }
//
// We start out with self_to_impl_substs, that maps the generic
// parameters of Type to that of the Drop impl.
// | // definition yields the instantiated assumptions:
//
// ['y : 'z]
//
// We then check all of the predicates of the Drop impl:
//
// ['y:'z, 'x:'y]
//
// and ensure each is in the list of instantiated
// assumptions. Here, `'y:'z` is present, but `'x:'y` is
// absent. So we report an error that the Drop impl injected a
// predicate that is not present on the struct definition.
let tcx = ccx.tcx;
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
// We can assume the predicates attached to struct/enum definition
// hold.
let generic_assumptions = tcx.lookup_predicates(self_type_did);
let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
// An earlier version of this code attempted to do this checking
// via the traits::fulfill machinery. However, it ran into trouble
// since the fulfill machinery merely turns outlives-predicates
// 'a:'b and T:'b into region inference constraints. It is simpler
// just to look for all the predicates directly.
assert_eq!(dtor_predicates.parent, None);
for predicate in &dtor_predicates.predicates {
// (We do not need to worry about deep analysis of type
// expressions etc because the Drop impls are already forced
// to take on a structure that is roughly an alpha-renaming of
// the generic parameters of the item definition.)
// This path now just checks *all* predicates via the direct
// lookup, rather than using fulfill machinery.
//
// However, it may be more efficient in the future to batch
// the analysis together via the fulfill, rather than the
// repeated `contains` calls.
if!assumptions_in_impl_context.contains(&predicate) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0367,
"The requirement `{}` is added only by the Drop impl.", predicate)
.span_note(item_span,
"The same requirement must be part of \
the struct/enum definition")
.emit();
}
}
if tcx.sess.has_errors() {
return Err(());
}
Ok(())
}
/// check_safety_of_destructor_if_necessary confirms that the type
/// expression `typ` conforms to the "Drop Check Rule" from the Sound
/// Generic Drop (RFC 769).
///
/// ----
///
/// The simplified (*) Drop Check Rule is the following:
///
/// Let `v` be some value (either temporary or named) and 'a be some
/// lifetime (scope). If the type of `v` owns data of type `D`, where
///
/// * (1.) `D` has a lifetime- or type-parametric Drop implementation,
/// (where that `Drop` implementation does not opt-out of
/// this check via the `unsafe_destructor_blind_to_params`
/// attribute), and
/// * (2.) the structure of `D` can reach a reference of type `&'a _`,
///
/// then 'a must strictly outlive the scope of v.
///
/// ----
///
/// This function is meant to by applied to the type for every
/// expression in the program.
///
/// ----
///
/// (*) The qualifier "simplified" is attached to the above
/// definition of the Drop Check Rule, because it is a simplification
/// of the original Drop Check rule, which attempted to prove that
/// some `Drop` implementations could not possibly access data even if
/// it was technically reachable, due to parametricity.
///
/// However, (1.) parametricity on its own turned out to be a
/// necessary but insufficient condition, and (2.) future changes to
/// the language are expected to make it impossible to ensure that a
/// `Drop` implementation is actually parametric with respect to any
/// particular type parameter. (In particular, impl specialization is
/// expected to break the needed parametricity property beyond
/// repair.)
///
/// Therefore we have scaled back Drop-Check to a more conservative
/// rule that does not attempt to deduce whether a `Drop`
/// implementation could not possible access data of a given lifetime;
/// instead Drop-Check now simply assumes that if a destructor has
/// access (direct or indirect) to a lifetime parameter, then that
/// lifetime must be forced to outlive that destructor's dynamic
/// extent. We then provide the `unsafe_destructor_blind_to_params`
/// attribute as a way for destructor implementations to opt-out of
/// this conservative assumption (and thus assume the obligation of
/// ensuring that they do not access data nor invoke methods of
/// values that have been previously dropped).
///
pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>(
rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>,
typ: ty::Ty<'tcx>,
span: Span,
scope: region::CodeExtent)
{
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
typ, scope);
let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
});
let result = iterate_over_potentially_unsafe_regions_in_type(
&mut DropckContext {
rcx: rcx,
span: span,
parent_scope: parent_scope,
breadcrumbs: FnvHashSet()
},
TypeContext::Root,
typ,
0);
match result {
Ok(()) => {}
Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => {
let tcx = rcx.tcx;
let mut err = struct_span_err!(tcx.sess, span, E0320,
"overflow while adding drop-check rules for {}", typ);
match *ctxt {
TypeContext::Root => {
// no need for an additional note if the overflow
// was somehow on the root.
}
TypeContext::ADT { def_id, variant, field } => {
let adt = tcx.lookup_adt_def(def_id);
let variant_name = match adt.adt_kind() {
AdtKind::Enum => format!("enum {} variant {}",
tcx.item_path_str(def_id),
variant),
AdtKind::Struct => format!("struct {}",
tcx.item_path_str(def_id)),
AdtKind::Union => format!("union {}",
tcx.item_path_str(def_id)),
};
span_note!(
&mut err,
span,
"overflowed on {} field {} type: {}",
variant_name,
field,
detected_on_typ);
}
}
err.emit();
}
}
}
enum Error<'tcx> {
Overflow(TypeContext, ty::Ty<'tcx>),
}
#[derive(Copy, Clone)]
enum TypeContext {
Root,
ADT {
def_id: DefId,
variant: ast::Name,
field: ast::Name,
}
}
struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> {
rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>,
/// types that have already been traversed
breadcrumbs: FnvHashSet<Ty<'tcx>>,
/// span for error reporting
span: Span,
/// the scope reachable dtorck types must outlive
parent_scope: region::CodeExtent
}
// `context` is used for reporting overflow errors
fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>(
cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>,
context: TypeContext,
ty: Ty<'tcx>,
depth: usize) -> Result<(), Error<'tcx>>
{
let tcx = cx.rcx.tcx;
// Issue #22443: Watch out for overflow. While we are careful to
// handle regular types properly, non-regular ones cause problems.
let recursion_limit = tcx.sess.recursion_limit.get();
if depth / 4 >= recursion_limit {
// This can get into rather deep recursion, especially in the
// presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T.
// use a higher recursion limit to avoid errors.
return Err(Error::Overflow(context, ty))
}
// canoncialize the regions in `ty` before inserting - infinitely many
// region variables can refer to the same region.
let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty);
if!cx.breadcrumbs.insert(ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?} - cached",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
return Ok(()); // we already visited this type
}
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?}",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
// If `typ` has a destructor, then we must ensure that all
// borrowed data reachable via `typ` must outlive the parent
// of `scope`. This is handled below.
//
// However, there is an important special case: for any Drop
// impl that is tagged as "blind" to their parameters,
// we assume that data borrowed via such type parameters
// remains unreachable via that Drop impl.
//
// For example, consider:
//
// ```rust
// #[unsafe_destructor_blind_to_params]
// impl<T> Drop for Vec<T> {... }
// ```
//
// which does have to be able to drop instances of `T`, but
// otherwise cannot read data from `T`.
//
// Of course, for the type expression passed in for any such
// unbounded type parameter `T`, we must resume the recursive
// analysis on `T` (since it would be ignored by
// type_must_outlive).
if has_dtor_of_interest(tcx, ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} - is a dtorck type!",
(0..depth).map(|_|'').collect::<String>(),
ty);
cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span),
ty, tcx.mk_region(ty::ReScope(cx.parent_scope)));
return Ok(());
}
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?} - checking interior",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
// We still need to ensure all referenced data is safe.
match ty.sty {
ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) |
ty::TyFloat(_) | ty::TyStr | ty::TyNever => {
// primitive - definitely safe
Ok(())
}
ty::TyBox(ity) | ty::TyArray(ity, _) | ty::TySlice(ity) => {
// single-element containers, behave like their element
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
ty::TyAdt(def, substs) if def.is_phantom_data() => {
// PhantomData<T> - behaves identically to T
let ity = substs.type_at(0);
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
ty::TyAdt(def, substs) => {
let did = def.did;
for variant in &def.variants {
for field in variant.fields.iter() {
let fty = field.ty(tcx, substs);
let fty = cx.rcx.fcx.resolve_type_vars_with_obligations(
cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty));
iterate_over_potentially_unsafe_regions_in_type(
cx,
TypeContext::ADT {
def_id: did,
field: field.name,
variant: variant.name,
},
fty,
depth+1)?
}
}
Ok(())
}
ty::TyTuple(tys) |
ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys,.. }) => {
for ty in tys {
iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)?
}
Ok(())
}
ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => {
// these always come with a witness of liveness (references
// explicitly, pointers implicitly, parameters by the
// caller).
Ok(())
}
ty::TyFnDef(..) | ty::TyFnPtr(_) => {
// FIXME(#26656): this type is always destruction-safe, but
// it implicitly witnesses Self: Fn, which can be false.
Ok(())
}
ty::TyInfer(..) | | // self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x}
//
// Applying this to the predicates (i.e. assumptions) provided by the item | random_line_split |
dropck.rs | free_region::FreeRegionMap;
use rustc::infer;
use middle::region;
use rustc::ty::subst::{Subst, Substs};
use rustc::ty::{self, AdtKind, Ty, TyCtxt};
use rustc::traits::{self, Reveal};
use util::nodemap::FnvHashSet;
use syntax::ast;
use syntax_pos::{self, Span};
/// check_drop_impl confirms that the Drop implementation identfied by
/// `drop_impl_did` is not any more specialized than the type it is
/// attached to (Issue #8142).
///
/// This means:
///
/// 1. The self type must be nominal (this is already checked during
/// coherence),
///
/// 2. The generic region/type parameters of the impl's self-type must
/// all be parameters of the Drop impl itself (i.e. no
/// specialization like `impl Drop for Foo<i32>`), and,
///
/// 3. Any bounds on the generic parameters must be reflected in the
/// struct/enum definition for the nominal type itself (i.e.
/// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> {... }`).
///
pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> {
let dtor_self_type = ccx.tcx.lookup_item_type(drop_impl_did).ty;
let dtor_predicates = ccx.tcx.lookup_predicates(drop_impl_did);
match dtor_self_type.sty {
ty::TyAdt(adt_def, self_to_impl_substs) => {
ensure_drop_params_and_item_params_correspond(ccx,
drop_impl_did,
dtor_self_type,
adt_def.did)?;
ensure_drop_predicates_are_implied_by_item_defn(ccx,
drop_impl_did,
&dtor_predicates,
adt_def.did,
self_to_impl_substs)
}
_ => {
// Destructors only work on nominal types. This was
// already checked by coherence, so we can panic here.
let span = ccx.tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
span_bug!(span,
"should have been rejected by coherence check: {}",
dtor_self_type);
}
}
}
fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
drop_impl_ty: Ty<'tcx>,
self_type_did: DefId) -> Result<(), ()>
| if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span),
named_type, fresh_impl_self_ty) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0366,
"Implementations of Drop cannot be specialized")
.span_note(item_span,
"Use same sequence of generic type and region \
parameters that is on the struct/enum definition")
.emit();
return Err(());
}
if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) {
// this could be reached when we get lazy normalization
infcx.report_fulfillment_errors(errors);
return Err(());
}
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id);
Ok(())
})
}
/// Confirms that every predicate imposed by dtor_predicates is
/// implied by assuming the predicates attached to self_type_did.
fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
dtor_predicates: &ty::GenericPredicates<'tcx>,
self_type_did: DefId,
self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> {
// Here is an example, analogous to that from
// `compare_impl_method`.
//
// Consider a struct type:
//
// struct Type<'c, 'b:'c, 'a> {
// x: &'a Contents // (contents are irrelevant;
// y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.)
// }
//
// and a Drop impl:
//
// impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> {
// fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y)
// }
//
// We start out with self_to_impl_substs, that maps the generic
// parameters of Type to that of the Drop impl.
//
// self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x}
//
// Applying this to the predicates (i.e. assumptions) provided by the item
// definition yields the instantiated assumptions:
//
// ['y : 'z]
//
// We then check all of the predicates of the Drop impl:
//
// ['y:'z, 'x:'y]
//
// and ensure each is in the list of instantiated
// assumptions. Here, `'y:'z` is present, but `'x:'y` is
// absent. So we report an error that the Drop impl injected a
// predicate that is not present on the struct definition.
let tcx = ccx.tcx;
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
// We can assume the predicates attached to struct/enum definition
// hold.
let generic_assumptions = tcx.lookup_predicates(self_type_did);
let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
// An earlier version of this code attempted to do this checking
// via the traits::fulfill machinery. However, it ran into trouble
// since the fulfill machinery merely turns outlives-predicates
// 'a:'b and T:'b into region inference constraints. It is simpler
// just to look for all the predicates directly.
assert_eq!(dtor_predicates.parent, None);
for predicate in &dtor_predicates.predicates {
// (We do not need to worry about deep analysis of type
// expressions etc because the Drop impls are already forced
// to take on a structure that is roughly an alpha-renaming of
// the generic parameters of the item definition.)
// This path now just checks *all* predicates via the direct
// lookup, rather than using fulfill machinery.
//
// However, it may be more efficient in the future to batch
// the analysis together via the fulfill, rather than the
// repeated `contains` calls.
if!assumptions_in_impl_context.contains(&predicate) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0367,
"The requirement `{}` is added only by the Drop impl.", predicate)
.span_note(item_span,
"The same requirement must be part of \
the struct/enum definition")
.emit();
}
}
if tcx.sess.has_errors() {
return Err(());
}
Ok(())
}
/// check_safety_of_destructor_if_necessary confirms that the type
/// expression `typ` conforms to the "Drop Check Rule" from the Sound
/// Generic Drop (RFC 769).
///
/// ----
///
/// The simplified (*) Drop Check Rule is the following:
///
/// Let `v` be some value (either temporary or named) and 'a be some
/// lifetime (scope). If the type of `v` owns data of type `D`, where
///
/// * (1.) `D` has a lifetime- or type-parametric Drop implementation,
/// (where that `Drop` implementation does not opt-out of
/// this check via the `unsafe_destructor_blind_to_params`
/// attribute), and
/// * (2.) the structure of `D` can reach a reference of type `&'a _`,
///
/// then 'a must strictly outlive the scope of v.
///
/// ----
///
/// This function is meant to by applied to the type for every
/// expression in the program.
///
/// ----
///
/// (*) The qualifier "simplified" is attached to the above
/// definition of the Drop Check Rule, because it is a simplification
/// of the original Drop Check rule, which attempted to prove that
/// some `Drop` implementations could not possibly access data even if
/// it was technically reachable, due to parametricity.
///
/// However, (1.) parametricity on its own turned out to be a
/// necessary but insufficient condition, and (2.) future changes to
/// the language are expected to make it impossible to ensure that a
/// `Drop` implementation is actually parametric with respect to any
/// particular type parameter. (In particular, impl specialization is
/// expected to break the needed parametricity property beyond
/// repair.)
///
/// Therefore we have scaled back Drop-Check to a more conservative
/// rule that does not attempt to deduce whether a `Drop`
/// implementation could not possible access data of a given lifetime;
/// instead Drop-Check now simply assumes that if a destructor has
/// access (direct or indirect) to a lifetime parameter, then that
/// lifetime must be forced to outlive that destructor's dynamic
/// extent. We then provide the `unsafe_destructor_blind_to_params`
/// attribute as a way for destructor implementations to opt-out of
/// this conservative assumption (and thus assume the obligation of
/// ensuring that they do not access data nor invoke methods of
/// values that have been previously dropped).
///
pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>(
rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>,
typ: ty::Ty<'tcx>,
span: Span,
scope: region::CodeExtent)
{
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
typ, scope);
let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
});
let result = iterate_over_potentially_unsafe_regions_in_type(
&mut DropckContext {
rcx: rcx,
span: span,
parent_scope: parent_scope,
breadcrumbs: FnvHashSet()
},
TypeContext::Root,
typ,
0);
match result {
Ok(()) => {}
Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => {
let tcx = rcx.tcx;
let mut err = struct_span_err!(tcx.sess, span, E0320,
"overflow while adding drop-check rules for {}", typ);
match *ctxt {
TypeContext::Root => {
// no need for an additional note if the overflow
// was somehow on the root.
}
TypeContext::ADT { def_id, variant, field } => {
let adt = tcx.lookup_adt_def(def_id);
let variant_name = match adt.adt_kind() {
AdtKind::Enum => format!("enum {} variant {}",
tcx.item_path_str(def_id),
variant),
AdtKind::Struct => format!("struct {}",
tcx.item_path_str(def_id)),
AdtKind::Union => format!("union {}",
tcx.item_path_str(def_id)),
};
span_note!(
&mut err,
span,
"overflowed on {} field {} type: {}",
variant_name,
field,
detected_on_typ);
}
}
err.emit();
}
}
}
enum Error<'tcx> {
Overflow(TypeContext, ty::Ty<'tcx>),
}
#[derive(Copy, Clone)]
enum TypeContext {
Root,
ADT {
def_id: DefId,
variant: ast::Name,
field: ast::Name,
}
}
struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> {
rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>,
/// types that have already been traversed
breadcrumbs: FnvHashSet<Ty<'tcx>>,
/// span for error reporting
span: Span,
/// the scope reachable dtorck types must outlive
parent_scope: region::CodeExtent
}
// `context` is used for reporting overflow errors
fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>(
cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>,
context: TypeContext,
ty: Ty<'tcx>,
depth: usize) -> Result<(), Error<'tcx>>
{
let tcx = cx.rcx.tcx;
// Issue #22443: Watch out for overflow. While we are careful to
// handle regular types properly, non-regular ones cause problems.
let recursion_limit = tcx.sess.recursion_limit.get();
if depth / 4 >= recursion_limit {
// This can get into rather deep recursion, especially in the
// presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T.
// use a higher recursion limit to avoid errors.
return Err(Error::Overflow(context, ty))
}
// canoncialize the regions in `ty` before inserting - infinitely many
// region variables can refer to the same region.
let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty);
if!cx.breadcrumbs.insert(ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?} - cached",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
return Ok(()); // we already visited this type
}
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?}",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
// If `typ` has a destructor, then we must ensure that all
// borrowed data reachable via `typ` must outlive the parent
// of `scope`. This is handled below.
//
// However, there is an important special case: for any Drop
// impl that is tagged as "blind" to their parameters,
// we assume that data borrowed via such type parameters
// remains unreachable via that Drop impl.
//
// For example, consider:
//
// ```rust
// #[unsafe_destructor_blind_to_params]
// impl<T> Drop for Vec<T> {... }
// ```
//
// which does have to be able to drop instances of `T`, but
// otherwise cannot read data from `T`.
//
// Of course, for the type expression passed in for any such
// unbounded type parameter `T`, we must resume the recursive
// analysis on `T` (since it would be ignored by
// type_must_outlive).
if has_dtor_of_interest(tcx, ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} - is a dtorck type!",
(0..depth).map(|_|'').collect::<String>(),
ty);
cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span),
ty, tcx.mk_region(ty::ReScope(cx.parent_scope)));
return Ok(());
}
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?} - checking interior",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
// We still need to ensure all referenced data is safe.
match ty.sty {
ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) |
ty::TyFloat(_) | ty::TyStr | ty::TyNever => {
// primitive - definitely safe
Ok(())
}
ty::TyBox(ity) | ty::TyArray(ity, _) | ty::TySlice(ity) => {
// single-element containers, behave like their element
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
ty::TyAdt(def, substs) if def.is_phantom_data() => {
// PhantomData<T> - behaves identically to T
let ity = substs.type_at(0);
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
ty::TyAdt(def, substs) => {
let did = def.did;
for variant in &def.variants {
for field in variant.fields.iter() {
let fty = field.ty(tcx, substs);
let fty = cx.rcx.fcx.resolve_type_vars_with_obligations(
cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty));
iterate_over_potentially_unsafe_regions_in_type(
cx,
TypeContext::ADT {
def_id: did,
field: field.name,
variant: variant.name,
},
fty,
depth+1)?
}
}
Ok(())
}
ty::TyTuple(tys) |
ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys,.. }) => {
for ty in tys {
iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)?
}
Ok(())
}
ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => {
// these always come with a witness of liveness (references
// explicitly, pointers implicitly, parameters by the
// caller).
Ok(())
}
ty::TyFnDef(..) | ty::TyFnPtr(_) => {
// FIXME(#26656): this type is always destruction-safe, but
// it implicitly witnesses Self: Fn, which can be false.
Ok(())
}
ty::TyInfer(..) | {
let tcx = ccx.tcx;
let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap();
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
// check that the impl type can be made to match the trait type.
let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id);
tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| {
let tcx = infcx.tcx;
let mut fulfillment_cx = traits::FulfillmentContext::new();
let named_type = tcx.lookup_item_type(self_type_did).ty;
let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs);
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
let fresh_impl_substs =
infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did);
let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs);
| identifier_body |
dropck.rs | free_region::FreeRegionMap;
use rustc::infer;
use middle::region;
use rustc::ty::subst::{Subst, Substs};
use rustc::ty::{self, AdtKind, Ty, TyCtxt};
use rustc::traits::{self, Reveal};
use util::nodemap::FnvHashSet;
use syntax::ast;
use syntax_pos::{self, Span};
/// check_drop_impl confirms that the Drop implementation identfied by
/// `drop_impl_did` is not any more specialized than the type it is
/// attached to (Issue #8142).
///
/// This means:
///
/// 1. The self type must be nominal (this is already checked during
/// coherence),
///
/// 2. The generic region/type parameters of the impl's self-type must
/// all be parameters of the Drop impl itself (i.e. no
/// specialization like `impl Drop for Foo<i32>`), and,
///
/// 3. Any bounds on the generic parameters must be reflected in the
/// struct/enum definition for the nominal type itself (i.e.
/// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> {... }`).
///
pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> {
let dtor_self_type = ccx.tcx.lookup_item_type(drop_impl_did).ty;
let dtor_predicates = ccx.tcx.lookup_predicates(drop_impl_did);
match dtor_self_type.sty {
ty::TyAdt(adt_def, self_to_impl_substs) => {
ensure_drop_params_and_item_params_correspond(ccx,
drop_impl_did,
dtor_self_type,
adt_def.did)?;
ensure_drop_predicates_are_implied_by_item_defn(ccx,
drop_impl_did,
&dtor_predicates,
adt_def.did,
self_to_impl_substs)
}
_ => {
// Destructors only work on nominal types. This was
// already checked by coherence, so we can panic here.
let span = ccx.tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
span_bug!(span,
"should have been rejected by coherence check: {}",
dtor_self_type);
}
}
}
fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
drop_impl_ty: Ty<'tcx>,
self_type_did: DefId) -> Result<(), ()>
{
let tcx = ccx.tcx;
let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap();
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
// check that the impl type can be made to match the trait type.
let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id);
tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| {
let tcx = infcx.tcx;
let mut fulfillment_cx = traits::FulfillmentContext::new();
let named_type = tcx.lookup_item_type(self_type_did).ty;
let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs);
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
let fresh_impl_substs =
infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did);
let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs);
if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span),
named_type, fresh_impl_self_ty) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0366,
"Implementations of Drop cannot be specialized")
.span_note(item_span,
"Use same sequence of generic type and region \
parameters that is on the struct/enum definition")
.emit();
return Err(());
}
if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) {
// this could be reached when we get lazy normalization
infcx.report_fulfillment_errors(errors);
return Err(());
}
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id);
Ok(())
})
}
/// Confirms that every predicate imposed by dtor_predicates is
/// implied by assuming the predicates attached to self_type_did.
fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
dtor_predicates: &ty::GenericPredicates<'tcx>,
self_type_did: DefId,
self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> {
// Here is an example, analogous to that from
// `compare_impl_method`.
//
// Consider a struct type:
//
// struct Type<'c, 'b:'c, 'a> {
// x: &'a Contents // (contents are irrelevant;
// y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.)
// }
//
// and a Drop impl:
//
// impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> {
// fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y)
// }
//
// We start out with self_to_impl_substs, that maps the generic
// parameters of Type to that of the Drop impl.
//
// self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x}
//
// Applying this to the predicates (i.e. assumptions) provided by the item
// definition yields the instantiated assumptions:
//
// ['y : 'z]
//
// We then check all of the predicates of the Drop impl:
//
// ['y:'z, 'x:'y]
//
// and ensure each is in the list of instantiated
// assumptions. Here, `'y:'z` is present, but `'x:'y` is
// absent. So we report an error that the Drop impl injected a
// predicate that is not present on the struct definition.
let tcx = ccx.tcx;
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
// We can assume the predicates attached to struct/enum definition
// hold.
let generic_assumptions = tcx.lookup_predicates(self_type_did);
let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
// An earlier version of this code attempted to do this checking
// via the traits::fulfill machinery. However, it ran into trouble
// since the fulfill machinery merely turns outlives-predicates
// 'a:'b and T:'b into region inference constraints. It is simpler
// just to look for all the predicates directly.
assert_eq!(dtor_predicates.parent, None);
for predicate in &dtor_predicates.predicates {
// (We do not need to worry about deep analysis of type
// expressions etc because the Drop impls are already forced
// to take on a structure that is roughly an alpha-renaming of
// the generic parameters of the item definition.)
// This path now just checks *all* predicates via the direct
// lookup, rather than using fulfill machinery.
//
// However, it may be more efficient in the future to batch
// the analysis together via the fulfill, rather than the
// repeated `contains` calls.
if!assumptions_in_impl_context.contains(&predicate) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0367,
"The requirement `{}` is added only by the Drop impl.", predicate)
.span_note(item_span,
"The same requirement must be part of \
the struct/enum definition")
.emit();
}
}
if tcx.sess.has_errors() {
return Err(());
}
Ok(())
}
/// check_safety_of_destructor_if_necessary confirms that the type
/// expression `typ` conforms to the "Drop Check Rule" from the Sound
/// Generic Drop (RFC 769).
///
/// ----
///
/// The simplified (*) Drop Check Rule is the following:
///
/// Let `v` be some value (either temporary or named) and 'a be some
/// lifetime (scope). If the type of `v` owns data of type `D`, where
///
/// * (1.) `D` has a lifetime- or type-parametric Drop implementation,
/// (where that `Drop` implementation does not opt-out of
/// this check via the `unsafe_destructor_blind_to_params`
/// attribute), and
/// * (2.) the structure of `D` can reach a reference of type `&'a _`,
///
/// then 'a must strictly outlive the scope of v.
///
/// ----
///
/// This function is meant to by applied to the type for every
/// expression in the program.
///
/// ----
///
/// (*) The qualifier "simplified" is attached to the above
/// definition of the Drop Check Rule, because it is a simplification
/// of the original Drop Check rule, which attempted to prove that
/// some `Drop` implementations could not possibly access data even if
/// it was technically reachable, due to parametricity.
///
/// However, (1.) parametricity on its own turned out to be a
/// necessary but insufficient condition, and (2.) future changes to
/// the language are expected to make it impossible to ensure that a
/// `Drop` implementation is actually parametric with respect to any
/// particular type parameter. (In particular, impl specialization is
/// expected to break the needed parametricity property beyond
/// repair.)
///
/// Therefore we have scaled back Drop-Check to a more conservative
/// rule that does not attempt to deduce whether a `Drop`
/// implementation could not possible access data of a given lifetime;
/// instead Drop-Check now simply assumes that if a destructor has
/// access (direct or indirect) to a lifetime parameter, then that
/// lifetime must be forced to outlive that destructor's dynamic
/// extent. We then provide the `unsafe_destructor_blind_to_params`
/// attribute as a way for destructor implementations to opt-out of
/// this conservative assumption (and thus assume the obligation of
/// ensuring that they do not access data nor invoke methods of
/// values that have been previously dropped).
///
pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>(
rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>,
typ: ty::Ty<'tcx>,
span: Span,
scope: region::CodeExtent)
{
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
typ, scope);
let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
});
let result = iterate_over_potentially_unsafe_regions_in_type(
&mut DropckContext {
rcx: rcx,
span: span,
parent_scope: parent_scope,
breadcrumbs: FnvHashSet()
},
TypeContext::Root,
typ,
0);
match result {
Ok(()) => {}
Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => {
let tcx = rcx.tcx;
let mut err = struct_span_err!(tcx.sess, span, E0320,
"overflow while adding drop-check rules for {}", typ);
match *ctxt {
TypeContext::Root => {
// no need for an additional note if the overflow
// was somehow on the root.
}
TypeContext::ADT { def_id, variant, field } => {
let adt = tcx.lookup_adt_def(def_id);
let variant_name = match adt.adt_kind() {
AdtKind::Enum => format!("enum {} variant {}",
tcx.item_path_str(def_id),
variant),
AdtKind::Struct => format!("struct {}",
tcx.item_path_str(def_id)),
AdtKind::Union => format!("union {}",
tcx.item_path_str(def_id)),
};
span_note!(
&mut err,
span,
"overflowed on {} field {} type: {}",
variant_name,
field,
detected_on_typ);
}
}
err.emit();
}
}
}
enum | <'tcx> {
Overflow(TypeContext, ty::Ty<'tcx>),
}
#[derive(Copy, Clone)]
enum TypeContext {
Root,
ADT {
def_id: DefId,
variant: ast::Name,
field: ast::Name,
}
}
struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> {
rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>,
/// types that have already been traversed
breadcrumbs: FnvHashSet<Ty<'tcx>>,
/// span for error reporting
span: Span,
/// the scope reachable dtorck types must outlive
parent_scope: region::CodeExtent
}
// `context` is used for reporting overflow errors
fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>(
cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>,
context: TypeContext,
ty: Ty<'tcx>,
depth: usize) -> Result<(), Error<'tcx>>
{
let tcx = cx.rcx.tcx;
// Issue #22443: Watch out for overflow. While we are careful to
// handle regular types properly, non-regular ones cause problems.
let recursion_limit = tcx.sess.recursion_limit.get();
if depth / 4 >= recursion_limit {
// This can get into rather deep recursion, especially in the
// presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T.
// use a higher recursion limit to avoid errors.
return Err(Error::Overflow(context, ty))
}
// canoncialize the regions in `ty` before inserting - infinitely many
// region variables can refer to the same region.
let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty);
if!cx.breadcrumbs.insert(ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?} - cached",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
return Ok(()); // we already visited this type
}
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?}",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
// If `typ` has a destructor, then we must ensure that all
// borrowed data reachable via `typ` must outlive the parent
// of `scope`. This is handled below.
//
// However, there is an important special case: for any Drop
// impl that is tagged as "blind" to their parameters,
// we assume that data borrowed via such type parameters
// remains unreachable via that Drop impl.
//
// For example, consider:
//
// ```rust
// #[unsafe_destructor_blind_to_params]
// impl<T> Drop for Vec<T> {... }
// ```
//
// which does have to be able to drop instances of `T`, but
// otherwise cannot read data from `T`.
//
// Of course, for the type expression passed in for any such
// unbounded type parameter `T`, we must resume the recursive
// analysis on `T` (since it would be ignored by
// type_must_outlive).
if has_dtor_of_interest(tcx, ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} - is a dtorck type!",
(0..depth).map(|_|'').collect::<String>(),
ty);
cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span),
ty, tcx.mk_region(ty::ReScope(cx.parent_scope)));
return Ok(());
}
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?} - checking interior",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
// We still need to ensure all referenced data is safe.
match ty.sty {
ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) |
ty::TyFloat(_) | ty::TyStr | ty::TyNever => {
// primitive - definitely safe
Ok(())
}
ty::TyBox(ity) | ty::TyArray(ity, _) | ty::TySlice(ity) => {
// single-element containers, behave like their element
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
ty::TyAdt(def, substs) if def.is_phantom_data() => {
// PhantomData<T> - behaves identically to T
let ity = substs.type_at(0);
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
ty::TyAdt(def, substs) => {
let did = def.did;
for variant in &def.variants {
for field in variant.fields.iter() {
let fty = field.ty(tcx, substs);
let fty = cx.rcx.fcx.resolve_type_vars_with_obligations(
cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty));
iterate_over_potentially_unsafe_regions_in_type(
cx,
TypeContext::ADT {
def_id: did,
field: field.name,
variant: variant.name,
},
fty,
depth+1)?
}
}
Ok(())
}
ty::TyTuple(tys) |
ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys,.. }) => {
for ty in tys {
iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)?
}
Ok(())
}
ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => {
// these always come with a witness of liveness (references
// explicitly, pointers implicitly, parameters by the
// caller).
Ok(())
}
ty::TyFnDef(..) | ty::TyFnPtr(_) => {
// FIXME(#26656): this type is always destruction-safe, but
// it implicitly witnesses Self: Fn, which can be false.
Ok(())
}
ty::TyInfer | Error | identifier_name |
dropck.rs | cannot do `struct S<T>; impl<T:Clone> Drop for S<T> {... }`).
///
pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> {
let dtor_self_type = ccx.tcx.lookup_item_type(drop_impl_did).ty;
let dtor_predicates = ccx.tcx.lookup_predicates(drop_impl_did);
match dtor_self_type.sty {
ty::TyAdt(adt_def, self_to_impl_substs) => {
ensure_drop_params_and_item_params_correspond(ccx,
drop_impl_did,
dtor_self_type,
adt_def.did)?;
ensure_drop_predicates_are_implied_by_item_defn(ccx,
drop_impl_did,
&dtor_predicates,
adt_def.did,
self_to_impl_substs)
}
_ => {
// Destructors only work on nominal types. This was
// already checked by coherence, so we can panic here.
let span = ccx.tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
span_bug!(span,
"should have been rejected by coherence check: {}",
dtor_self_type);
}
}
}
fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
drop_impl_ty: Ty<'tcx>,
self_type_did: DefId) -> Result<(), ()>
{
let tcx = ccx.tcx;
let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap();
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
// check that the impl type can be made to match the trait type.
let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id);
tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| {
let tcx = infcx.tcx;
let mut fulfillment_cx = traits::FulfillmentContext::new();
let named_type = tcx.lookup_item_type(self_type_did).ty;
let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs);
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
let fresh_impl_substs =
infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did);
let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs);
if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span),
named_type, fresh_impl_self_ty) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0366,
"Implementations of Drop cannot be specialized")
.span_note(item_span,
"Use same sequence of generic type and region \
parameters that is on the struct/enum definition")
.emit();
return Err(());
}
if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) {
// this could be reached when we get lazy normalization
infcx.report_fulfillment_errors(errors);
return Err(());
}
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id);
Ok(())
})
}
/// Confirms that every predicate imposed by dtor_predicates is
/// implied by assuming the predicates attached to self_type_did.
fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
dtor_predicates: &ty::GenericPredicates<'tcx>,
self_type_did: DefId,
self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> {
// Here is an example, analogous to that from
// `compare_impl_method`.
//
// Consider a struct type:
//
// struct Type<'c, 'b:'c, 'a> {
// x: &'a Contents // (contents are irrelevant;
// y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.)
// }
//
// and a Drop impl:
//
// impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> {
// fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y)
// }
//
// We start out with self_to_impl_substs, that maps the generic
// parameters of Type to that of the Drop impl.
//
// self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x}
//
// Applying this to the predicates (i.e. assumptions) provided by the item
// definition yields the instantiated assumptions:
//
// ['y : 'z]
//
// We then check all of the predicates of the Drop impl:
//
// ['y:'z, 'x:'y]
//
// and ensure each is in the list of instantiated
// assumptions. Here, `'y:'z` is present, but `'x:'y` is
// absent. So we report an error that the Drop impl injected a
// predicate that is not present on the struct definition.
let tcx = ccx.tcx;
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
// We can assume the predicates attached to struct/enum definition
// hold.
let generic_assumptions = tcx.lookup_predicates(self_type_did);
let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
// An earlier version of this code attempted to do this checking
// via the traits::fulfill machinery. However, it ran into trouble
// since the fulfill machinery merely turns outlives-predicates
// 'a:'b and T:'b into region inference constraints. It is simpler
// just to look for all the predicates directly.
assert_eq!(dtor_predicates.parent, None);
for predicate in &dtor_predicates.predicates {
// (We do not need to worry about deep analysis of type
// expressions etc because the Drop impls are already forced
// to take on a structure that is roughly an alpha-renaming of
// the generic parameters of the item definition.)
// This path now just checks *all* predicates via the direct
// lookup, rather than using fulfill machinery.
//
// However, it may be more efficient in the future to batch
// the analysis together via the fulfill, rather than the
// repeated `contains` calls.
if!assumptions_in_impl_context.contains(&predicate) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0367,
"The requirement `{}` is added only by the Drop impl.", predicate)
.span_note(item_span,
"The same requirement must be part of \
the struct/enum definition")
.emit();
}
}
if tcx.sess.has_errors() {
return Err(());
}
Ok(())
}
/// check_safety_of_destructor_if_necessary confirms that the type
/// expression `typ` conforms to the "Drop Check Rule" from the Sound
/// Generic Drop (RFC 769).
///
/// ----
///
/// The simplified (*) Drop Check Rule is the following:
///
/// Let `v` be some value (either temporary or named) and 'a be some
/// lifetime (scope). If the type of `v` owns data of type `D`, where
///
/// * (1.) `D` has a lifetime- or type-parametric Drop implementation,
/// (where that `Drop` implementation does not opt-out of
/// this check via the `unsafe_destructor_blind_to_params`
/// attribute), and
/// * (2.) the structure of `D` can reach a reference of type `&'a _`,
///
/// then 'a must strictly outlive the scope of v.
///
/// ----
///
/// This function is meant to by applied to the type for every
/// expression in the program.
///
/// ----
///
/// (*) The qualifier "simplified" is attached to the above
/// definition of the Drop Check Rule, because it is a simplification
/// of the original Drop Check rule, which attempted to prove that
/// some `Drop` implementations could not possibly access data even if
/// it was technically reachable, due to parametricity.
///
/// However, (1.) parametricity on its own turned out to be a
/// necessary but insufficient condition, and (2.) future changes to
/// the language are expected to make it impossible to ensure that a
/// `Drop` implementation is actually parametric with respect to any
/// particular type parameter. (In particular, impl specialization is
/// expected to break the needed parametricity property beyond
/// repair.)
///
/// Therefore we have scaled back Drop-Check to a more conservative
/// rule that does not attempt to deduce whether a `Drop`
/// implementation could not possible access data of a given lifetime;
/// instead Drop-Check now simply assumes that if a destructor has
/// access (direct or indirect) to a lifetime parameter, then that
/// lifetime must be forced to outlive that destructor's dynamic
/// extent. We then provide the `unsafe_destructor_blind_to_params`
/// attribute as a way for destructor implementations to opt-out of
/// this conservative assumption (and thus assume the obligation of
/// ensuring that they do not access data nor invoke methods of
/// values that have been previously dropped).
///
pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>(
rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>,
typ: ty::Ty<'tcx>,
span: Span,
scope: region::CodeExtent)
{
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
typ, scope);
let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
});
let result = iterate_over_potentially_unsafe_regions_in_type(
&mut DropckContext {
rcx: rcx,
span: span,
parent_scope: parent_scope,
breadcrumbs: FnvHashSet()
},
TypeContext::Root,
typ,
0);
match result {
Ok(()) => {}
Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => {
let tcx = rcx.tcx;
let mut err = struct_span_err!(tcx.sess, span, E0320,
"overflow while adding drop-check rules for {}", typ);
match *ctxt {
TypeContext::Root => {
// no need for an additional note if the overflow
// was somehow on the root.
}
TypeContext::ADT { def_id, variant, field } => {
let adt = tcx.lookup_adt_def(def_id);
let variant_name = match adt.adt_kind() {
AdtKind::Enum => format!("enum {} variant {}",
tcx.item_path_str(def_id),
variant),
AdtKind::Struct => format!("struct {}",
tcx.item_path_str(def_id)),
AdtKind::Union => format!("union {}",
tcx.item_path_str(def_id)),
};
span_note!(
&mut err,
span,
"overflowed on {} field {} type: {}",
variant_name,
field,
detected_on_typ);
}
}
err.emit();
}
}
}
enum Error<'tcx> {
Overflow(TypeContext, ty::Ty<'tcx>),
}
#[derive(Copy, Clone)]
enum TypeContext {
Root,
ADT {
def_id: DefId,
variant: ast::Name,
field: ast::Name,
}
}
struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> {
rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>,
/// types that have already been traversed
breadcrumbs: FnvHashSet<Ty<'tcx>>,
/// span for error reporting
span: Span,
/// the scope reachable dtorck types must outlive
parent_scope: region::CodeExtent
}
// `context` is used for reporting overflow errors
fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>(
cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>,
context: TypeContext,
ty: Ty<'tcx>,
depth: usize) -> Result<(), Error<'tcx>>
{
let tcx = cx.rcx.tcx;
// Issue #22443: Watch out for overflow. While we are careful to
// handle regular types properly, non-regular ones cause problems.
let recursion_limit = tcx.sess.recursion_limit.get();
if depth / 4 >= recursion_limit {
// This can get into rather deep recursion, especially in the
// presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T.
// use a higher recursion limit to avoid errors.
return Err(Error::Overflow(context, ty))
}
// canoncialize the regions in `ty` before inserting - infinitely many
// region variables can refer to the same region.
let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty);
if!cx.breadcrumbs.insert(ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?} - cached",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
return Ok(()); // we already visited this type
}
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?}",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
// If `typ` has a destructor, then we must ensure that all
// borrowed data reachable via `typ` must outlive the parent
// of `scope`. This is handled below.
//
// However, there is an important special case: for any Drop
// impl that is tagged as "blind" to their parameters,
// we assume that data borrowed via such type parameters
// remains unreachable via that Drop impl.
//
// For example, consider:
//
// ```rust
// #[unsafe_destructor_blind_to_params]
// impl<T> Drop for Vec<T> {... }
// ```
//
// which does have to be able to drop instances of `T`, but
// otherwise cannot read data from `T`.
//
// Of course, for the type expression passed in for any such
// unbounded type parameter `T`, we must resume the recursive
// analysis on `T` (since it would be ignored by
// type_must_outlive).
if has_dtor_of_interest(tcx, ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} - is a dtorck type!",
(0..depth).map(|_|'').collect::<String>(),
ty);
cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span),
ty, tcx.mk_region(ty::ReScope(cx.parent_scope)));
return Ok(());
}
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?} - checking interior",
(0..depth).map(|_|'').collect::<String>(),
ty, cx.parent_scope);
// We still need to ensure all referenced data is safe.
match ty.sty {
ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) |
ty::TyFloat(_) | ty::TyStr | ty::TyNever => {
// primitive - definitely safe
Ok(())
}
ty::TyBox(ity) | ty::TyArray(ity, _) | ty::TySlice(ity) => {
// single-element containers, behave like their element
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
ty::TyAdt(def, substs) if def.is_phantom_data() => {
// PhantomData<T> - behaves identically to T
let ity = substs.type_at(0);
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
ty::TyAdt(def, substs) => {
let did = def.did;
for variant in &def.variants {
for field in variant.fields.iter() {
let fty = field.ty(tcx, substs);
let fty = cx.rcx.fcx.resolve_type_vars_with_obligations(
cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty));
iterate_over_potentially_unsafe_regions_in_type(
cx,
TypeContext::ADT {
def_id: did,
field: field.name,
variant: variant.name,
},
fty,
depth+1)?
}
}
Ok(())
}
ty::TyTuple(tys) |
ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys,.. }) => {
for ty in tys {
iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)?
}
Ok(())
}
ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => {
// these always come with a witness of liveness (references
// explicitly, pointers implicitly, parameters by the
// caller).
Ok(())
}
ty::TyFnDef(..) | ty::TyFnPtr(_) => {
// FIXME(#26656): this type is always destruction-safe, but
// it implicitly witnesses Self: Fn, which can be false.
Ok(())
}
ty::TyInfer(..) | ty::TyError => {
tcx.sess.delay_span_bug(cx.span, "unresolved type in regionck");
Ok(())
}
// these are always dtorck
ty::TyTrait(..) | ty::TyProjection(_) | ty::TyAnon(..) => bug!(),
}
}
fn has_dtor_of_interest<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyAdt(def, _) => | {
def.is_dtorck(tcx)
} | conditional_block |
|
ui.rs | use failure::{bail, ensure, format_err, Error, Fallible};
use std::cell::{RefCell, RefMut};
use std::io::Read;
use std::rc::Rc;
use crate::terminal::{set_stdin_echo, TERMINAL_CLEAR_LINE};
use crate::util::to_hex_string;
use crate::{Reader, ReaderFactory, Writer};
const ERROR_VERBOSITY: i32 = -1;
const INTERACTIVE_VERBOSITY: i32 = -1;
// User interaction interface.
pub trait UI {
// Initialization
fn set_verbosity(&mut self, verbosity: i32);
fn set_progress_enabled(&mut self, enabled: bool);
// Environment information
fn program_name(&self) -> &str;
// Write/Print interface
fn will_print(&self, verbosity: i32) -> bool;
fn print(&self, verbosity: i32, message: &str) -> Fallible<()>;
fn print_error(&self, err: &Error) -> Fallible<()>;
fn println_interactive(&self, message: &str) -> Fallible<()>;
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>;
fn println(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.print(verbosity, &format!("{}\n", message))
}
// Read interface
fn can_read(&self) -> bool;
fn read_prompt(&self, prompt: &str) -> Fallible<String>;
fn set_stdin_echo(&self, enable: bool);
fn read_prompt_bool(
&self,
verbosity: i32,
prompt: &str,
default: bool,
) -> Fallible<Option<bool>> {
if!self.can_read() ||!self.will_print(verbosity) {
return Ok(None);
}
let yn_helper = if default { "[Y/n]" } else { "[y/N]" };
let prompt = format!("{} {}: ", prompt, yn_helper);
loop {
match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() {
"y" | "yes" => return Ok(Some(true)),
"n" | "no" => return Ok(Some(false)),
"" => return Ok(Some(default)),
_ => {
self.println_interactive("Invalid input, please enter 'y' or 'n'.")?;
}
}
}
}
fn read_password(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
self.set_stdin_echo(false);
let res = self.read_prompt(prompt);
self.set_stdin_echo(true);
// With echo off we don't get the newline character from input; we need to output it ourselves.
self.println_interactive("")?;
res
}
}
pub struct BasicUI {
program_name: String,
input: Rc<RefCell<Option<Reader>>>,
output: RefCell<Writer>,
input_is_tty: bool,
output_is_tty: bool,
verbosity: i32,
progress_enabled: bool,
}
impl BasicUI {
pub fn new(
program_name: String,
input: Reader,
input_is_tty: bool,
output: Writer,
output_is_tty: bool,
) -> BasicUI {
BasicUI {
program_name,
input: Rc::new(RefCell::new(Some(input))),
input_is_tty,
output: RefCell::new(output),
output_is_tty,
verbosity: 0,
progress_enabled: true,
}
}
// Create a function that extracts input stream from this struct, returning it to the caller.
// After returned function is called, this struct loses input stream and with it the ability to
// prompt user for input/passwords.
pub fn input_stream_extractor(&mut self) -> ReaderFactory {
let input = Rc::clone(&self.input);
Box::new(move || Ok(input.borrow_mut().take().unwrap()))
}
}
impl UI for BasicUI {
fn set_verbosity(&mut self, verbosity: i32) {
self.verbosity = verbosity;
}
fn set_progress_enabled(&mut self, enabled: bool) {
self.progress_enabled = enabled;
}
fn program_name(&self) -> &str {
&self.program_name
}
// Write interface
fn will_print(&self, verbosity: i32) -> bool {
verbosity <= self.verbosity
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
if self.will_print(verbosity) {
self.output.borrow_mut().write_all(message.as_bytes())?;
}
Ok(())
}
fn print_error(&self, err: &Error) -> Fallible<()> {
if self.will_print(ERROR_VERBOSITY) |
Ok(())
}
fn println_interactive(&self, message: &str) -> Fallible<()> {
if self.will_print(INTERACTIVE_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}", message)?;
}
Ok(())
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
if self.progress_enabled {
let last_char = if finish { "\n" } else { "\r" };
let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char);
self.print(verbosity, &message)?;
}
Ok(())
}
// Read interface
fn can_read(&self) -> bool {
self.input.borrow().is_some()
&& self.input_is_tty
&& self.output_is_tty
&& self.will_print(INTERACTIVE_VERBOSITY)
}
fn read_prompt(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
let mut output = self.output.borrow_mut();
let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap());
write!(output, "{}", prompt)?;
// Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'.
let mut char_bytes = vec![];
let mut res = String::new();
for byte in input.by_ref().bytes() {
char_bytes.push(byte?);
match std::str::from_utf8(&char_bytes) {
Ok(valid_char) => {
match valid_char {
"\n" => {
if res.ends_with('\r') {
res.pop(); // Handle Windows CRLF.
}
return Ok(res);
}
valid_char => res.push_str(valid_char),
}
char_bytes.clear();
}
Err(utf_err) => match utf_err.error_len() {
None => (), // Incomplete character - get more bytes.
Some(_) => bail!(
"Error reading from stdin: Non-UTF8 byte sequence encountered: {}",
to_hex_string(char_bytes)
),
},
}
}
Err(format_err!("Error reading from stdin: EOF"))
}
fn set_stdin_echo(&self, enable: bool) {
set_stdin_echo(enable);
}
}
#[cfg(test)]
pub mod test_helpers {
use super::*;
use std::collections::VecDeque;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum PrintType {
Log { verbosity: i32 },
Error,
Interactive,
Progress { verbosity: i32, finish: bool },
}
#[derive(Default)]
pub struct TestUI {
pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>,
pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>,
}
impl TestUI {
pub fn new() -> TestUI {
TestUI {
..Default::default()
}
}
pub fn expect_prompt(
self,
matcher: impl AsRef<str>,
reply: Result<impl AsRef<str>, Error>,
) -> Self {
self.prompt_replies.borrow_mut().push_back((
Some(matcher.as_ref().to_string()),
reply.map(|s| s.as_ref().to_string()),
));
self
}
pub fn expect_all_prompts_asked(&self) {
assert_eq!(self.prompt_replies.borrow_mut().len(), 0);
}
fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> {
let message = message.as_ref();
let lines = message.lines().collect::<Vec<_>>();
let lines_len = lines.len();
let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| {
let line_finished = idx < lines_len - 1 || message.ends_with('\n');
(typ, line.to_string(), line_finished)
});
let mut printed_lines = self.printed_lines.borrow_mut();
// Append to last line if it has the same type
if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() {
if *last_typ == typ &&!*last_line_finished {
if let Some((_, line, finished)) = line_tuples.next() {
last_line.push_str(&line);
*last_line_finished = finished;
}
}
}
printed_lines.extend(line_tuples);
Ok(())
}
}
impl UI for TestUI {
fn set_verbosity(&mut self, _verbosity: i32) {}
fn set_progress_enabled(&mut self, _enabled: bool) {}
fn program_name(&self) -> &str {
"rypt"
}
// Write interface
fn will_print(&self, _verbosity: i32) -> bool {
true
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.append_printed_lines(PrintType::Log { verbosity }, message)
}
fn print_error(&self, err: &Error) -> Result<(), Error> {
self.append_printed_lines(PrintType::Error, &format!("{}", err))
}
fn println_interactive(&self, message: &str) -> Result<(), Error> {
self.append_printed_lines(PrintType::Interactive, message)
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
self.append_printed_lines(PrintType::Progress { verbosity, finish }, message)
}
// Read interface
fn can_read(&self) -> bool {
true
}
fn read_prompt(&self, prompt: &str) -> Result<String, Error> {
let (matcher, reply) = self
.prompt_replies
.borrow_mut()
.pop_front()
.unwrap_or_else(|| panic!("Unexpected prompt in TestUI: '{}'", prompt));
if let Some(matcher) = matcher {
assert!(
prompt.contains(&matcher),
"Unexpected prompt in TestUI: '{}', was looking for '{}'",
prompt,
matcher
);
}
reply
}
fn set_stdin_echo(&self, _enable: bool) {}
}
}
| {
writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?;
} | conditional_block |
ui.rs | use failure::{bail, ensure, format_err, Error, Fallible};
use std::cell::{RefCell, RefMut};
use std::io::Read;
use std::rc::Rc;
use crate::terminal::{set_stdin_echo, TERMINAL_CLEAR_LINE};
use crate::util::to_hex_string;
use crate::{Reader, ReaderFactory, Writer};
const ERROR_VERBOSITY: i32 = -1;
const INTERACTIVE_VERBOSITY: i32 = -1;
// User interaction interface.
pub trait UI {
// Initialization
fn set_verbosity(&mut self, verbosity: i32);
fn set_progress_enabled(&mut self, enabled: bool);
// Environment information
fn program_name(&self) -> &str;
// Write/Print interface
fn will_print(&self, verbosity: i32) -> bool;
fn print(&self, verbosity: i32, message: &str) -> Fallible<()>;
fn print_error(&self, err: &Error) -> Fallible<()>;
fn println_interactive(&self, message: &str) -> Fallible<()>;
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>;
fn println(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.print(verbosity, &format!("{}\n", message))
}
// Read interface
fn can_read(&self) -> bool;
fn read_prompt(&self, prompt: &str) -> Fallible<String>;
fn set_stdin_echo(&self, enable: bool);
fn read_prompt_bool(
&self,
verbosity: i32,
prompt: &str,
default: bool,
) -> Fallible<Option<bool>> {
if!self.can_read() ||!self.will_print(verbosity) {
return Ok(None);
}
let yn_helper = if default { "[Y/n]" } else { "[y/N]" };
let prompt = format!("{} {}: ", prompt, yn_helper);
loop {
match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() {
"y" | "yes" => return Ok(Some(true)),
"n" | "no" => return Ok(Some(false)),
"" => return Ok(Some(default)),
_ => {
self.println_interactive("Invalid input, please enter 'y' or 'n'.")?;
}
}
}
}
fn read_password(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
self.set_stdin_echo(false);
let res = self.read_prompt(prompt);
self.set_stdin_echo(true);
// With echo off we don't get the newline character from input; we need to output it ourselves.
self.println_interactive("")?;
res
}
}
pub struct BasicUI {
program_name: String,
input: Rc<RefCell<Option<Reader>>>,
output: RefCell<Writer>,
input_is_tty: bool,
output_is_tty: bool,
verbosity: i32,
progress_enabled: bool,
}
impl BasicUI {
pub fn new(
program_name: String,
input: Reader,
input_is_tty: bool,
output: Writer,
output_is_tty: bool,
) -> BasicUI {
BasicUI {
program_name,
input: Rc::new(RefCell::new(Some(input))),
input_is_tty,
output: RefCell::new(output),
output_is_tty,
verbosity: 0,
progress_enabled: true,
}
}
// Create a function that extracts input stream from this struct, returning it to the caller.
// After returned function is called, this struct loses input stream and with it the ability to
// prompt user for input/passwords.
pub fn input_stream_extractor(&mut self) -> ReaderFactory {
let input = Rc::clone(&self.input);
Box::new(move || Ok(input.borrow_mut().take().unwrap()))
}
}
impl UI for BasicUI {
fn set_verbosity(&mut self, verbosity: i32) {
self.verbosity = verbosity;
}
fn set_progress_enabled(&mut self, enabled: bool) {
self.progress_enabled = enabled;
}
fn program_name(&self) -> &str {
&self.program_name
}
// Write interface
fn will_print(&self, verbosity: i32) -> bool {
verbosity <= self.verbosity
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
if self.will_print(verbosity) {
self.output.borrow_mut().write_all(message.as_bytes())?;
}
Ok(())
}
fn print_error(&self, err: &Error) -> Fallible<()> {
if self.will_print(ERROR_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?;
}
Ok(())
}
fn println_interactive(&self, message: &str) -> Fallible<()> {
if self.will_print(INTERACTIVE_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}", message)?;
}
Ok(())
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
if self.progress_enabled {
let last_char = if finish { "\n" } else { "\r" };
let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char);
self.print(verbosity, &message)?;
}
Ok(())
}
// Read interface
fn can_read(&self) -> bool {
self.input.borrow().is_some()
&& self.input_is_tty
&& self.output_is_tty
&& self.will_print(INTERACTIVE_VERBOSITY)
}
fn read_prompt(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
let mut output = self.output.borrow_mut();
let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap());
write!(output, "{}", prompt)?;
// Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'.
let mut char_bytes = vec![];
let mut res = String::new();
for byte in input.by_ref().bytes() {
char_bytes.push(byte?);
match std::str::from_utf8(&char_bytes) {
Ok(valid_char) => {
match valid_char {
"\n" => {
if res.ends_with('\r') {
res.pop(); // Handle Windows CRLF.
}
return Ok(res);
}
valid_char => res.push_str(valid_char),
}
char_bytes.clear();
}
Err(utf_err) => match utf_err.error_len() {
None => (), // Incomplete character - get more bytes.
Some(_) => bail!(
"Error reading from stdin: Non-UTF8 byte sequence encountered: {}",
to_hex_string(char_bytes)
),
},
}
}
Err(format_err!("Error reading from stdin: EOF"))
}
fn set_stdin_echo(&self, enable: bool) {
set_stdin_echo(enable);
}
}
#[cfg(test)]
pub mod test_helpers {
use super::*;
use std::collections::VecDeque;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum PrintType {
Log { verbosity: i32 },
Error,
Interactive,
Progress { verbosity: i32, finish: bool },
}
#[derive(Default)]
pub struct TestUI {
pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>,
pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>,
}
impl TestUI {
pub fn new() -> TestUI |
pub fn expect_prompt(
self,
matcher: impl AsRef<str>,
reply: Result<impl AsRef<str>, Error>,
) -> Self {
self.prompt_replies.borrow_mut().push_back((
Some(matcher.as_ref().to_string()),
reply.map(|s| s.as_ref().to_string()),
));
self
}
pub fn expect_all_prompts_asked(&self) {
assert_eq!(self.prompt_replies.borrow_mut().len(), 0);
}
fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> {
let message = message.as_ref();
let lines = message.lines().collect::<Vec<_>>();
let lines_len = lines.len();
let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| {
let line_finished = idx < lines_len - 1 || message.ends_with('\n');
(typ, line.to_string(), line_finished)
});
let mut printed_lines = self.printed_lines.borrow_mut();
// Append to last line if it has the same type
if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() {
if *last_typ == typ &&!*last_line_finished {
if let Some((_, line, finished)) = line_tuples.next() {
last_line.push_str(&line);
*last_line_finished = finished;
}
}
}
printed_lines.extend(line_tuples);
Ok(())
}
}
impl UI for TestUI {
fn set_verbosity(&mut self, _verbosity: i32) {}
fn set_progress_enabled(&mut self, _enabled: bool) {}
fn program_name(&self) -> &str {
"rypt"
}
// Write interface
fn will_print(&self, _verbosity: i32) -> bool {
true
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.append_printed_lines(PrintType::Log { verbosity }, message)
}
fn print_error(&self, err: &Error) -> Result<(), Error> {
self.append_printed_lines(PrintType::Error, &format!("{}", err))
}
fn println_interactive(&self, message: &str) -> Result<(), Error> {
self.append_printed_lines(PrintType::Interactive, message)
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
self.append_printed_lines(PrintType::Progress { verbosity, finish }, message)
}
// Read interface
fn can_read(&self) -> bool {
true
}
fn read_prompt(&self, prompt: &str) -> Result<String, Error> {
let (matcher, reply) = self
.prompt_replies
.borrow_mut()
.pop_front()
.unwrap_or_else(|| panic!("Unexpected prompt in TestUI: '{}'", prompt));
if let Some(matcher) = matcher {
assert!(
prompt.contains(&matcher),
"Unexpected prompt in TestUI: '{}', was looking for '{}'",
prompt,
matcher
);
}
reply
}
fn set_stdin_echo(&self, _enable: bool) {}
}
}
| {
TestUI {
..Default::default()
}
} | identifier_body |
ui.rs | use failure::{bail, ensure, format_err, Error, Fallible};
use std::cell::{RefCell, RefMut};
use std::io::Read;
use std::rc::Rc;
use crate::terminal::{set_stdin_echo, TERMINAL_CLEAR_LINE};
use crate::util::to_hex_string;
use crate::{Reader, ReaderFactory, Writer};
const ERROR_VERBOSITY: i32 = -1;
const INTERACTIVE_VERBOSITY: i32 = -1;
// User interaction interface.
pub trait UI {
// Initialization
fn set_verbosity(&mut self, verbosity: i32);
fn set_progress_enabled(&mut self, enabled: bool);
// Environment information
fn program_name(&self) -> &str;
// Write/Print interface
fn will_print(&self, verbosity: i32) -> bool;
fn print(&self, verbosity: i32, message: &str) -> Fallible<()>;
fn print_error(&self, err: &Error) -> Fallible<()>;
fn println_interactive(&self, message: &str) -> Fallible<()>;
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>;
fn println(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.print(verbosity, &format!("{}\n", message))
}
// Read interface
fn can_read(&self) -> bool;
fn read_prompt(&self, prompt: &str) -> Fallible<String>;
fn set_stdin_echo(&self, enable: bool);
fn read_prompt_bool(
&self,
verbosity: i32,
prompt: &str,
default: bool,
) -> Fallible<Option<bool>> {
if!self.can_read() ||!self.will_print(verbosity) {
return Ok(None);
}
let yn_helper = if default { "[Y/n]" } else { "[y/N]" };
let prompt = format!("{} {}: ", prompt, yn_helper);
loop {
match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() {
"y" | "yes" => return Ok(Some(true)),
"n" | "no" => return Ok(Some(false)),
"" => return Ok(Some(default)),
_ => {
self.println_interactive("Invalid input, please enter 'y' or 'n'.")?;
}
}
}
}
fn read_password(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
self.set_stdin_echo(false);
let res = self.read_prompt(prompt);
self.set_stdin_echo(true);
// With echo off we don't get the newline character from input; we need to output it ourselves.
self.println_interactive("")?;
res
}
}
pub struct BasicUI {
program_name: String,
input: Rc<RefCell<Option<Reader>>>,
output: RefCell<Writer>,
input_is_tty: bool,
output_is_tty: bool,
verbosity: i32,
progress_enabled: bool,
}
impl BasicUI {
pub fn new(
program_name: String,
input: Reader,
input_is_tty: bool,
output: Writer,
output_is_tty: bool,
) -> BasicUI {
BasicUI {
program_name,
input: Rc::new(RefCell::new(Some(input))),
input_is_tty,
output: RefCell::new(output),
output_is_tty,
verbosity: 0,
progress_enabled: true,
}
}
// Create a function that extracts input stream from this struct, returning it to the caller.
// After returned function is called, this struct loses input stream and with it the ability to
// prompt user for input/passwords.
pub fn input_stream_extractor(&mut self) -> ReaderFactory {
let input = Rc::clone(&self.input);
Box::new(move || Ok(input.borrow_mut().take().unwrap()))
}
}
impl UI for BasicUI {
fn set_verbosity(&mut self, verbosity: i32) {
self.verbosity = verbosity;
}
fn set_progress_enabled(&mut self, enabled: bool) {
self.progress_enabled = enabled;
}
fn program_name(&self) -> &str {
&self.program_name
}
// Write interface
fn will_print(&self, verbosity: i32) -> bool {
verbosity <= self.verbosity
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
if self.will_print(verbosity) {
self.output.borrow_mut().write_all(message.as_bytes())?;
}
Ok(())
}
fn print_error(&self, err: &Error) -> Fallible<()> {
if self.will_print(ERROR_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?;
}
Ok(())
}
fn println_interactive(&self, message: &str) -> Fallible<()> {
if self.will_print(INTERACTIVE_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}", message)?;
}
Ok(())
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
if self.progress_enabled {
let last_char = if finish { "\n" } else { "\r" };
let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char);
self.print(verbosity, &message)?;
}
Ok(())
}
// Read interface
fn can_read(&self) -> bool {
self.input.borrow().is_some()
&& self.input_is_tty
&& self.output_is_tty
&& self.will_print(INTERACTIVE_VERBOSITY)
}
fn read_prompt(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
let mut output = self.output.borrow_mut();
let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap());
write!(output, "{}", prompt)?;
// Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'.
let mut char_bytes = vec![];
let mut res = String::new();
for byte in input.by_ref().bytes() {
char_bytes.push(byte?);
match std::str::from_utf8(&char_bytes) {
Ok(valid_char) => {
match valid_char {
"\n" => {
if res.ends_with('\r') {
res.pop(); // Handle Windows CRLF.
}
return Ok(res);
}
valid_char => res.push_str(valid_char),
}
char_bytes.clear();
}
Err(utf_err) => match utf_err.error_len() {
None => (), // Incomplete character - get more bytes.
Some(_) => bail!(
"Error reading from stdin: Non-UTF8 byte sequence encountered: {}",
to_hex_string(char_bytes)
),
},
}
}
Err(format_err!("Error reading from stdin: EOF"))
}
fn set_stdin_echo(&self, enable: bool) {
set_stdin_echo(enable);
}
}
#[cfg(test)]
pub mod test_helpers {
use super::*;
use std::collections::VecDeque;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum PrintType {
Log { verbosity: i32 },
Error,
Interactive,
Progress { verbosity: i32, finish: bool },
}
#[derive(Default)]
pub struct TestUI {
pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>,
pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>,
}
impl TestUI {
pub fn new() -> TestUI {
TestUI {
..Default::default()
}
}
pub fn expect_prompt(
self,
matcher: impl AsRef<str>,
reply: Result<impl AsRef<str>, Error>,
) -> Self {
self.prompt_replies.borrow_mut().push_back((
Some(matcher.as_ref().to_string()),
reply.map(|s| s.as_ref().to_string()),
));
self
}
pub fn expect_all_prompts_asked(&self) {
assert_eq!(self.prompt_replies.borrow_mut().len(), 0);
}
fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> {
let message = message.as_ref();
let lines = message.lines().collect::<Vec<_>>();
let lines_len = lines.len();
let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| {
let line_finished = idx < lines_len - 1 || message.ends_with('\n');
(typ, line.to_string(), line_finished)
});
let mut printed_lines = self.printed_lines.borrow_mut();
// Append to last line if it has the same type
if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() {
if *last_typ == typ &&!*last_line_finished {
if let Some((_, line, finished)) = line_tuples.next() {
last_line.push_str(&line);
*last_line_finished = finished;
}
} | }
}
impl UI for TestUI {
fn set_verbosity(&mut self, _verbosity: i32) {}
fn set_progress_enabled(&mut self, _enabled: bool) {}
fn program_name(&self) -> &str {
"rypt"
}
// Write interface
fn will_print(&self, _verbosity: i32) -> bool {
true
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.append_printed_lines(PrintType::Log { verbosity }, message)
}
fn print_error(&self, err: &Error) -> Result<(), Error> {
self.append_printed_lines(PrintType::Error, &format!("{}", err))
}
fn println_interactive(&self, message: &str) -> Result<(), Error> {
self.append_printed_lines(PrintType::Interactive, message)
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
self.append_printed_lines(PrintType::Progress { verbosity, finish }, message)
}
// Read interface
fn can_read(&self) -> bool {
true
}
fn read_prompt(&self, prompt: &str) -> Result<String, Error> {
let (matcher, reply) = self
.prompt_replies
.borrow_mut()
.pop_front()
.unwrap_or_else(|| panic!("Unexpected prompt in TestUI: '{}'", prompt));
if let Some(matcher) = matcher {
assert!(
prompt.contains(&matcher),
"Unexpected prompt in TestUI: '{}', was looking for '{}'",
prompt,
matcher
);
}
reply
}
fn set_stdin_echo(&self, _enable: bool) {}
}
} | }
printed_lines.extend(line_tuples);
Ok(()) | random_line_split |
ui.rs | use failure::{bail, ensure, format_err, Error, Fallible};
use std::cell::{RefCell, RefMut};
use std::io::Read;
use std::rc::Rc;
use crate::terminal::{set_stdin_echo, TERMINAL_CLEAR_LINE};
use crate::util::to_hex_string;
use crate::{Reader, ReaderFactory, Writer};
const ERROR_VERBOSITY: i32 = -1;
const INTERACTIVE_VERBOSITY: i32 = -1;
// User interaction interface.
pub trait UI {
// Initialization
fn set_verbosity(&mut self, verbosity: i32);
fn set_progress_enabled(&mut self, enabled: bool);
// Environment information
fn program_name(&self) -> &str;
// Write/Print interface
fn will_print(&self, verbosity: i32) -> bool;
fn print(&self, verbosity: i32, message: &str) -> Fallible<()>;
fn print_error(&self, err: &Error) -> Fallible<()>;
fn println_interactive(&self, message: &str) -> Fallible<()>;
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>;
fn println(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.print(verbosity, &format!("{}\n", message))
}
// Read interface
fn can_read(&self) -> bool;
fn read_prompt(&self, prompt: &str) -> Fallible<String>;
fn set_stdin_echo(&self, enable: bool);
fn read_prompt_bool(
&self,
verbosity: i32,
prompt: &str,
default: bool,
) -> Fallible<Option<bool>> {
if!self.can_read() ||!self.will_print(verbosity) {
return Ok(None);
}
let yn_helper = if default { "[Y/n]" } else { "[y/N]" };
let prompt = format!("{} {}: ", prompt, yn_helper);
loop {
match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() {
"y" | "yes" => return Ok(Some(true)),
"n" | "no" => return Ok(Some(false)),
"" => return Ok(Some(default)),
_ => {
self.println_interactive("Invalid input, please enter 'y' or 'n'.")?;
}
}
}
}
fn read_password(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
self.set_stdin_echo(false);
let res = self.read_prompt(prompt);
self.set_stdin_echo(true);
// With echo off we don't get the newline character from input; we need to output it ourselves.
self.println_interactive("")?;
res
}
}
pub struct BasicUI {
program_name: String,
input: Rc<RefCell<Option<Reader>>>,
output: RefCell<Writer>,
input_is_tty: bool,
output_is_tty: bool,
verbosity: i32,
progress_enabled: bool,
}
impl BasicUI {
pub fn new(
program_name: String,
input: Reader,
input_is_tty: bool,
output: Writer,
output_is_tty: bool,
) -> BasicUI {
BasicUI {
program_name,
input: Rc::new(RefCell::new(Some(input))),
input_is_tty,
output: RefCell::new(output),
output_is_tty,
verbosity: 0,
progress_enabled: true,
}
}
// Create a function that extracts input stream from this struct, returning it to the caller.
// After returned function is called, this struct loses input stream and with it the ability to
// prompt user for input/passwords.
pub fn input_stream_extractor(&mut self) -> ReaderFactory {
let input = Rc::clone(&self.input);
Box::new(move || Ok(input.borrow_mut().take().unwrap()))
}
}
impl UI for BasicUI {
fn set_verbosity(&mut self, verbosity: i32) {
self.verbosity = verbosity;
}
fn set_progress_enabled(&mut self, enabled: bool) {
self.progress_enabled = enabled;
}
fn program_name(&self) -> &str {
&self.program_name
}
// Write interface
fn will_print(&self, verbosity: i32) -> bool {
verbosity <= self.verbosity
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
if self.will_print(verbosity) {
self.output.borrow_mut().write_all(message.as_bytes())?;
}
Ok(())
}
fn print_error(&self, err: &Error) -> Fallible<()> {
if self.will_print(ERROR_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?;
}
Ok(())
}
fn println_interactive(&self, message: &str) -> Fallible<()> {
if self.will_print(INTERACTIVE_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}", message)?;
}
Ok(())
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
if self.progress_enabled {
let last_char = if finish { "\n" } else { "\r" };
let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char);
self.print(verbosity, &message)?;
}
Ok(())
}
// Read interface
fn | (&self) -> bool {
self.input.borrow().is_some()
&& self.input_is_tty
&& self.output_is_tty
&& self.will_print(INTERACTIVE_VERBOSITY)
}
fn read_prompt(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
let mut output = self.output.borrow_mut();
let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap());
write!(output, "{}", prompt)?;
// Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'.
let mut char_bytes = vec![];
let mut res = String::new();
for byte in input.by_ref().bytes() {
char_bytes.push(byte?);
match std::str::from_utf8(&char_bytes) {
Ok(valid_char) => {
match valid_char {
"\n" => {
if res.ends_with('\r') {
res.pop(); // Handle Windows CRLF.
}
return Ok(res);
}
valid_char => res.push_str(valid_char),
}
char_bytes.clear();
}
Err(utf_err) => match utf_err.error_len() {
None => (), // Incomplete character - get more bytes.
Some(_) => bail!(
"Error reading from stdin: Non-UTF8 byte sequence encountered: {}",
to_hex_string(char_bytes)
),
},
}
}
Err(format_err!("Error reading from stdin: EOF"))
}
fn set_stdin_echo(&self, enable: bool) {
set_stdin_echo(enable);
}
}
#[cfg(test)]
pub mod test_helpers {
use super::*;
use std::collections::VecDeque;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum PrintType {
Log { verbosity: i32 },
Error,
Interactive,
Progress { verbosity: i32, finish: bool },
}
#[derive(Default)]
pub struct TestUI {
pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>,
pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>,
}
impl TestUI {
pub fn new() -> TestUI {
TestUI {
..Default::default()
}
}
pub fn expect_prompt(
self,
matcher: impl AsRef<str>,
reply: Result<impl AsRef<str>, Error>,
) -> Self {
self.prompt_replies.borrow_mut().push_back((
Some(matcher.as_ref().to_string()),
reply.map(|s| s.as_ref().to_string()),
));
self
}
pub fn expect_all_prompts_asked(&self) {
assert_eq!(self.prompt_replies.borrow_mut().len(), 0);
}
fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> {
let message = message.as_ref();
let lines = message.lines().collect::<Vec<_>>();
let lines_len = lines.len();
let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| {
let line_finished = idx < lines_len - 1 || message.ends_with('\n');
(typ, line.to_string(), line_finished)
});
let mut printed_lines = self.printed_lines.borrow_mut();
// Append to last line if it has the same type
if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() {
if *last_typ == typ &&!*last_line_finished {
if let Some((_, line, finished)) = line_tuples.next() {
last_line.push_str(&line);
*last_line_finished = finished;
}
}
}
printed_lines.extend(line_tuples);
Ok(())
}
}
impl UI for TestUI {
fn set_verbosity(&mut self, _verbosity: i32) {}
fn set_progress_enabled(&mut self, _enabled: bool) {}
fn program_name(&self) -> &str {
"rypt"
}
// Write interface
fn will_print(&self, _verbosity: i32) -> bool {
true
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.append_printed_lines(PrintType::Log { verbosity }, message)
}
fn print_error(&self, err: &Error) -> Result<(), Error> {
self.append_printed_lines(PrintType::Error, &format!("{}", err))
}
fn println_interactive(&self, message: &str) -> Result<(), Error> {
self.append_printed_lines(PrintType::Interactive, message)
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
self.append_printed_lines(PrintType::Progress { verbosity, finish }, message)
}
// Read interface
fn can_read(&self) -> bool {
true
}
fn read_prompt(&self, prompt: &str) -> Result<String, Error> {
let (matcher, reply) = self
.prompt_replies
.borrow_mut()
.pop_front()
.unwrap_or_else(|| panic!("Unexpected prompt in TestUI: '{}'", prompt));
if let Some(matcher) = matcher {
assert!(
prompt.contains(&matcher),
"Unexpected prompt in TestUI: '{}', was looking for '{}'",
prompt,
matcher
);
}
reply
}
fn set_stdin_echo(&self, _enable: bool) {}
}
}
| can_read | identifier_name |
resnet.rs | use plant::*;
use std::{rc::Rc, time::Instant, env, cmp::Ordering::*};
macro_rules! read { ($s: expr, $($arg:tt)*) => { ArrayInit::Data(&std::fs::read(&format!(concat!("resnet_data/", $s), $($arg)*)).unwrap()) }; }
type M = Slice<u8, usize>;
static TILE_MAP: [([u32; 6], [u32; 12]); 26] = [
// resnet18, 34
([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]),
([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]),
([64, 128, 56, 1, 2, 0], [4, 8, 2, 1, 1, 1, 14, 1, 2, 1, 1, 1]),
([64, 128, 56, 3, 2, 1], [16, 1, 8, 1, 1, 14, 14, 2, 2, 16, 1, 3]),
([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]),
([128, 256, 28, 1, 2, 0], [8, 4, 8, 1, 1, 14, 14, 2, 1, 1, 1, 1]),
([128, 256, 28, 3, 2, 1], [8, 2, 16, 1, 1, 1, 14, 1, 1, 2, 1, 1]),
([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]),
([256, 512, 14, 1, 2, 0], [16, 1, 8, 1, 7, 1, 7, 1, 1, 128, 1, 1]),
([256, 512, 14, 3, 2, 1], [8, 2, 32, 1, 1, 14, 7, 2, 1, 4, 1, 1]),
([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]),
// resent50, 101, 152,有5个shape前面出现过了
// ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]),
([64, 64, 56, 1, 1, 0], [4, 2, 1, 2, 1, 2, 8, 1, 1, 1, 1, 1]),
// ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]),
([64, 256, 56, 1, 1, 0], [8, 1, 2, 1, 2, 2, 8, 1, 1, 1, 1, 1]),
([256, 64, 56, 1, 1, 0], [8, 1, 2, 1, 2, 1, 8, 1, 1, 1, 1, 1]),
([256, 128, 56, 1, 2, 0], [16, 2, 2, 1, 1, 1, 14, 1, 4, 1, 1, 1]),
// ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]),
([128, 512, 28, 1, 1, 0], [4, 1, 8, 1, 1, 1, 14, 2, 1, 8, 1, 1]),
([256, 512, 56, 1, 2, 0], [16, 2, 8, 1, 1, 2, 14, 1, 2, 1, 1, 1]),
([512, 128, 28, 1, 1, 0], [1, 8, 8, 1, 1, 2, 14, 2, 1, 2, 1, 1]),
([512, 256, 28, 1, 2, 0], [8, 2, 2, 1, 1, 1, 14, 1, 2, 2, 1, 1]),
// ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]),
([256, 1024, 14, 1, 1, 0], [8, 1, 64, 2, 1, 7, 14, 1, 1, 128, 1, 1]),
([512, 1024, 28, 1, 2, 0], [16, 1, 32, 2, 1, 1, 14, 2, 1, 2, 1, 1]),
([1024, 256, 14, 1, 1, 0], [8, 1, 2, 2, 1, 1, 14, 1, 1, 1024, 1, 1]),
([1024, 512, 14, 1, 2, 0], [8, 2, 1, 1, 1, 2, 7, 2, 1, 128, 1, 1]),
// ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]),
([512, 2048, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 1, 1, 1]),
([1024, 2048, 14, 1, 2, 0], [4, 16, 1, 1, 1, 7, 7, 2, 1, 8, 1, 1]),
([2048, 512, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 2048, 1, 1]),
];
fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) -> (impl Fn(M, Option<M>), M) {
let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu);
let f = Func::new(&name);
let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW
let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW
let bias = f.buf("BIAS", F32, In, x![oc,]);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_add = if add!= 0 { Some(f.buf("ADD", F32, In, x![oc, osize, osize])) } else { None };
let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW
static mut LIB_CACHE: Vec<([u32; 8], Rc<Lib>)> = Vec::new();
let lib_cache = unsafe { &mut LIB_CACHE };
let lib = if let Some((_, x)) = lib_cache.iter().find(|(k, _)|
k == &[ic, oc, size, kern, stride, pad, add, relu]) {
println!("{} reused", name);
x.clone()
} else {
println!("{} compiling", name);
let [ff0, ff1, ff2, xx0, xx1, xx2, yy0, yy1, yy2, rc0, rx0, ry0] =
TILE_MAP.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad]).unwrap().1;
let pad_buf = if pad == 0 { a } else {
let pad_size = (osize - 1) * stride + kern; // <= size + 2 * pad,因为osize中/ stride不一定是整除
let pad_buf = f.buf("pad_buf", F32, Temp, x![ic, pad_size, pad_size]).set_loc(Local);
f.comp("cache_pad", x![ic, pad_size, pad_size],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }))
.tags(0..=(if ic < 32 { 1 } else { 0 }), Parallel).store(pad_buf);
pad_buf
};
let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
b.set_expr(x!(pad_buf(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
let mut b_final = x!(b(i0, i1, i2, 0, 0, 0) + bias(i0));
if let Some(x) = buf_add { // add-relu
b_final = x!(max::<f32>(0, b_final + x(i0, i1, i2)))
} else if relu!= 0 { b_final = x!(max::<f32>(0, b_final)); }
let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
for b in &[b, b_final] {
b.split(0, ff0).split(0, ff1).split(0, ff2)
.split(4, xx0).split(4, xx1).split(4, xx2)
.split(8, yy0).split(8, yy1).split(8, yy2);
}
b.split(12, rc0).split(14, rx0).split(16, ry0);
// ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i, rc_o, rc_i, rx_o, rx_i, ry_o, ry_i
b.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 12), (7, 14), (8, 16), (9, 2), (10, 6), (11, 10), (12, 13), (13, 15), (14, 17), (15, 3), (16, 7), (17, 11), ]);
// ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, rc_o, rx_o, ry_o, ff_o_i, yy_o_i, xx_o_i, rc_i, rx_i, ry_i, ff_i, yy_i, xx_i
// ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i
b_final.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 2), (7, 6), (8, 10), (9, 3), (10, 7), (11, 11), ]);
// ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, ff_o_i, yy_o_i, xx_o_i, ff_i, yy_i, xx_i
b.tags(0..=(if oc / ff0 / ff1 / ff2 < 32 { 5 } else { 0 }), Parallel);
if yy0 > 1 && yy0 < 32 { b.tag(17, Vectorize); }
let (ff_local, xx_local, yy_local) = (ff0 * ff1, xx0 * xx1, yy0 * yy1);
let b_local = f.buf("b_local", F32, Temp, x![ff_local, xx_local, yy_local])
.set_loc(Local).set_zero_init(true);
b_local.alloc_at(b, 5);
b.before(b_final, 6);
b.store_at(b_local, x![i0 % ff_local, i1 % xx_local, i2 % yy_local]);
b_final.store(buf_b);
if pad_buf!= a { pad_buf.alloc_at_func(); }
f.compile_arg("-mprefer-vector-width=512");
let lib = Rc::new(if let Some(x) = buf_add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap());
lib_cache.push(([ic, oc, size, kern, stride, pad, add, relu], lib.clone()));
lib
};
static mut ID: u32 = 0;
let id = unsafe { (ID, ID += 1).0 };
let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i, add| {
if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
}, b1)
}
// naive版本,能跑但很慢
// fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32)
// -> (impl Fn(M, Option<M>), M) {
// println!("ic: {}, oc: {}, size: {}, kern: {}, stride: {}, pad: {}", ic, oc, size, kern, stride, pad);
//
// let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu);
// let f = Func::new(&name);
// let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW
// let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW
// let bias = f.buf("BIAS", F32, In, x![oc,]);
// let osize = (size - kern + 2 * pad) / stride + 1;
// let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW
// let a_pad = f.comp("A_pad", x![ic, size + 2 * pad, size + 2 * pad],
// x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
// a_pad.set_inline(true);
//
// let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0)));
// let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
// b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
// let (b_final, add) = if add!= 0 { // add-relu
// let add = f.buf("ADD", F32, In, x![oc, osize, osize]);
// (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add))
// } else {
// (if relu!= 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None)
// };
// let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
// b_init.before(b, 3).before(b_final, 3);
// b_init.store(buf_b);
// b.store_at(buf_b, x![i0, i1, i2]);
// b_final.store(buf_b);
//
// let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap();
//
// static mut ID: u32 = 0;
// let id = unsafe { (ID, ID += 1).0 };
// let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
// let b1 = *b;
// (move |i, add| {
// if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
// }, b1)
// }
fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) {
let f = Func::new("maxpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
a_pad.set_inline(true);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]);
let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的
let b = f.comp("B", x![chan, osize, osize, kern, kern],
x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2))));
b_init.before(b, 3);
b_init.store(buf_b);
b.store_at(buf_b, x![i0, i1, i2]);
b.tag(0, Parallel);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) {
let f = Func::new("avgpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let buf_b = f.buf("B", F32, Out, x![chan,]);
let b_init = f.comp("B_init", x![chan,], x!(0));
let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0)));
let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size))));
b_init.before(b, 1).before(b_final, 1);
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_final.store(buf_b);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn gemv(m: u32, n: u32) -> (impl Fn(M), M) {
let f = Func::new("gemv");
let a = f.buf("A", F32, In, x![n,]);
let w = f.buf("W", F32, In, x![m, n]);
let c = f.buf("C", F32, In, x![m,]);
let buf_b = f.buf("B", F32, Out, x![m,]);
let b_init = f.comp("B_init", x![m,], x!(c(i0)));
let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0)));
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_init.before(b, 1);
b.tag(0, Parallel);
let lib = f.codegen(&[a, w, c, buf_b]).unwrap();
let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1)
}
fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) {
let expansion = if bottleneck { 4 } else { 1 };
let downsample = stride!= 1 || inplanes!= planes * expansion;
if bottleneck {
let (f1, b1) = conv(inplanes, planes, size, 1, stride, 0, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 0, 1);
let (f3, b3) = conv(planes, planes * expansion, size / stride, 1, 1, 0, 1, 1);
let f4 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f4, _)) = &f4 { f4(i, None); }
f1(i, None);
f2(b1, None);
f3(b2, Some(if let Some((_, b4)) = f4 { b4 } else { i }));
}), b3)
} else {
let (f1, b1) = conv(inplanes, planes, size, 3, stride, 1, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 1, 1);
let f3 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f3, _)) = &f3 { f3(i, None); }
f1(i, None);
f2(b1, Some(if let Some((_, b3)) = f3 { b3 } else { i }));
}), b2)
}
}
fn layer(inplanes: u32, planes: u32, blocks: u32, size: u32, stride: u32, bottleneck: bool) - | pl Fn(M), M) {
let expansion = if bottleneck { 4 } else { 1 };
let mut layers = Vec::with_capacity(blocks as _);
layers.push(block(inplanes, planes, size, stride, bottleneck));
for _ in 1..blocks {
layers.push(block(planes * expansion, planes, size / stride, 1, bottleneck));
}
let b = layers.last().unwrap().1;
(move |mut i| for (f, b) in &layers { f((i, i = *b).0); }, b)
}
fn main() {
parallel_init(0);
let args = env::args().collect::<Vec<_>>();
assert_eq!(args.len(), 3, "usage: cargo run --bin resnet <layer> <repeat>");
let repeat = args[2].parse::<u32>().unwrap();
let (blocks, bottleneck) = match args[1].as_str() {
"18" => (&[2, 2, 2, 2], false),
"34" => (&[3, 4, 6, 3], false),
"50" => (&[3, 4, 6, 3], true),
"101" => (&[3, 4, 23, 3], true),
"152" => (&[3, 8, 36, 3], true),
x => panic!("expect 1st argument to be [18, 34, 50, 101, 152], found {}", x),
};
let expansion = if bottleneck { 4 } else { 1 };
let input = Func::new("_").buf("input", F32, In, x![3, 224, 224]).array(read!("input",));
let (f1, b1) = conv(3, 64, 224, 7, 2, 3, 0, 1);
let (f2, b2) = maxpool(64, 112, 3, 2, 1);
let (f3, b3) = layer(64, 64, blocks[0], 56, 1, bottleneck);
let (f4, b4) = layer(64 * expansion, 128, blocks[1], 56, 2, bottleneck);
let (f5, b5) = layer(128 * expansion, 256, blocks[2], 28, 2, bottleneck);
let (f6, b6) = layer(256 * expansion, 512, blocks[3], 14, 2, bottleneck);
let (f7, b7) = avgpool(512 * expansion, 7);
let (f8, b8) = gemv(1000, 512 * expansion);
for _ in 0..4 {
let beg = Instant::now();
for _ in 0..repeat {
f1(*input, None);
f2(b1);
f3(b2);
f4(b3);
f5(b4);
f6(b5);
f7(b6);
f8(b7);
}
println!("{}s", Instant::now().duration_since(beg).as_secs_f32() / repeat as f32);
}
fn softmax(x: &mut [f32]) {
let mut m = f32::NEG_INFINITY;
for x in x.iter() { m = m.max(*x); }
let mut s = 0.0;
for x in x.iter_mut() { s += (*x = (*x - m).exp(), *x).1; }
for x in x.iter_mut() { *x /= s; }
}
let result = b8.transmute::<f32, _>(1000);
softmax(result.flat());
let mut result = result.flat().iter().copied().enumerate().collect::<Vec<_>>();
result.sort_unstable_by(|&(_, x1), &(_, x2)| if x1 > x2 { Less } else if x1 < x2 { Greater } else { Equal });
for (i, x) in &result[0..5] { println!("class = {}, prob = {}", i, x) }
}
| > (im | identifier_name |
resnet.rs | use plant::*;
use std::{rc::Rc, time::Instant, env, cmp::Ordering::*};
macro_rules! read { ($s: expr, $($arg:tt)*) => { ArrayInit::Data(&std::fs::read(&format!(concat!("resnet_data/", $s), $($arg)*)).unwrap()) }; }
type M = Slice<u8, usize>;
static TILE_MAP: [([u32; 6], [u32; 12]); 26] = [
// resnet18, 34
([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]),
([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]),
([64, 128, 56, 1, 2, 0], [4, 8, 2, 1, 1, 1, 14, 1, 2, 1, 1, 1]),
([64, 128, 56, 3, 2, 1], [16, 1, 8, 1, 1, 14, 14, 2, 2, 16, 1, 3]),
([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]),
([128, 256, 28, 1, 2, 0], [8, 4, 8, 1, 1, 14, 14, 2, 1, 1, 1, 1]),
([128, 256, 28, 3, 2, 1], [8, 2, 16, 1, 1, 1, 14, 1, 1, 2, 1, 1]),
([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]),
([256, 512, 14, 1, 2, 0], [16, 1, 8, 1, 7, 1, 7, 1, 1, 128, 1, 1]),
([256, 512, 14, 3, 2, 1], [8, 2, 32, 1, 1, 14, 7, 2, 1, 4, 1, 1]),
([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]),
// resent50, 101, 152,有5个shape前面出现过了
// ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]),
([64, 64, 56, 1, 1, 0], [4, 2, 1, 2, 1, 2, 8, 1, 1, 1, 1, 1]),
// ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]),
([64, 256, 56, 1, 1, 0], [8, 1, 2, 1, 2, 2, 8, 1, 1, 1, 1, 1]),
([256, 64, 56, 1, 1, 0], [8, 1, 2, 1, 2, 1, 8, 1, 1, 1, 1, 1]),
([256, 128, 56, 1, 2, 0], [16, 2, 2, 1, 1, 1, 14, 1, 4, 1, 1, 1]),
// ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]),
([128, 512, 28, 1, 1, 0], [4, 1, 8, 1, 1, 1, 14, 2, 1, 8, 1, 1]),
([256, 512, 56, 1, 2, 0], [16, 2, 8, 1, 1, 2, 14, 1, 2, 1, 1, 1]),
([512, 128, 28, 1, 1, 0], [1, 8, 8, 1, 1, 2, 14, 2, 1, 2, 1, 1]),
([512, 256, 28, 1, 2, 0], [8, 2, 2, 1, 1, 1, 14, 1, 2, 2, 1, 1]),
// ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]),
([256, 1024, 14, 1, 1, 0], [8, 1, 64, 2, 1, 7, 14, 1, 1, 128, 1, 1]),
([512, 1024, 28, 1, 2, 0], [16, 1, 32, 2, 1, 1, 14, 2, 1, 2, 1, 1]),
([1024, 256, 14, 1, 1, 0], [8, 1, 2, 2, 1, 1, 14, 1, 1, 1024, 1, 1]),
([1024, 512, 14, 1, 2, 0], [8, 2, 1, 1, 1, 2, 7, 2, 1, 128, 1, 1]),
// ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]),
([512, 2048, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 1, 1, 1]),
([1024, 2048, 14, 1, 2, 0], [4, 16, 1, 1, 1, 7, 7, 2, 1, 8, 1, 1]),
([2048, 512, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 2048, 1, 1]),
];
fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) -> (impl Fn(M, Option<M>), M) {
let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu);
let f = Func::new(&name);
let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW
let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW
let bias = f.buf("BIAS", F32, In, x![oc,]);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_add = if add!= 0 { Some(f.buf("ADD", F32, In, x![oc, osize, osize])) } else { None };
let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW
static mut LIB_CACHE: Vec<([u32; 8], Rc<Lib>)> = Vec::new();
let lib_cache = unsafe { &mut LIB_CACHE };
let lib = if let Some((_, x)) = lib_cache.iter().find(|(k, _)|
k == &[ic, oc, size, kern, stride, pad, add, relu]) {
println!("{} reused", name);
x.clone()
} else {
println!("{} compiling", name);
let [ff0, ff1, ff2, xx0, xx1, xx2, yy0, yy1, yy2, rc0, rx0, ry0] =
TILE_MAP.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad]).unwrap().1;
let pad_buf = if pad == 0 { a } else {
let pad_size = (osize - 1) * stride + kern; // <= size + 2 * pad,因为osize中/ stride不一定是整除
let pad_buf = f.buf("pad_buf", F32, Temp, x![ic, pad_size, pad_size]).set_loc(Local);
f.comp("cache_pad", x![ic, pad_size, pad_size],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }))
.tags(0..=(if ic < 32 { 1 } else { 0 }), Parallel).store(pad_buf);
pad_buf
};
let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
b.set_expr(x!(pad_buf(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
let mut b_final = x!(b(i0, i1, i2, 0, 0, 0) + bias(i0));
if let Some(x) = buf_add { // add-relu
b_final = x!(max::<f32>(0, b_final + x(i0, i1, i2)))
} else if relu!= 0 { b_final = x!(max::<f32>(0, b_final)); }
let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
for b in &[b, b_final] {
b.split(0, ff0).split(0, ff1).split(0, ff2)
.split(4, xx0).split(4, xx1).split(4, xx2)
.split(8, yy0).split(8, yy1).split(8, yy2);
}
b.split(12, rc0).split(14, rx0).split(16, ry0);
// ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i, rc_o, rc_i, rx_o, rx_i, ry_o, ry_i
b.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 12), (7, 14), (8, 16), (9, 2), (10, 6), (11, 10), (12, 13), (13, 15), (14, 17), (15, 3), (16, 7), (17, 11), ]);
// ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, rc_o, rx_o, ry_o, ff_o_i, yy_o_i, xx_o_i, rc_i, rx_i, ry_i, ff_i, yy_i, xx_i
// ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i
b_final.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 2), (7, 6), (8, 10), (9, 3), (10, 7), (11, 11), ]);
// ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, ff_o_i, yy_o_i, xx_o_i, ff_i, yy_i, xx_i
b.tags(0..=(if oc / ff0 / ff1 / ff2 < 32 { 5 } else { 0 }), Parallel);
if yy0 > 1 && yy0 < 32 { b.tag(17, Vectorize); }
let (ff_local, xx_local, yy_local) = (ff0 * ff1, xx0 * xx1, yy0 * yy1);
let b_local = f.buf("b_local", F32, Temp, x![ff_local, xx_local, yy_local])
.set_loc(Local).set_zero_init(true);
b_local.alloc_at(b, 5);
b.before(b_final, 6);
b.store_at(b_local, x![i0 % ff_local, i1 % xx_local, i2 % yy_local]);
b_final.store(buf_b);
if pad_buf!= a { pad_buf.alloc_at_func(); }
f.compile_arg("-mprefer-vector-width=512");
let lib = Rc::new(if let Some(x) = buf_add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap());
lib_cache.push(([ic, oc, size, kern, stride, pad, add, relu], lib.clone()));
lib
};
static mut ID: u32 = 0;
let id = unsafe { (ID, ID += 1).0 };
let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i, add| {
if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
}, b1)
}
// naive版本,能跑但很慢
// fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32)
// -> (impl Fn(M, Option<M>), M) {
// println!("ic: {}, oc: {}, size: {}, kern: {}, stride: {}, pad: {}", ic, oc, size, kern, stride, pad);
//
// let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu);
// let f = Func::new(&name);
// let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW
// let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW
// let bias = f.buf("BIAS", F32, In, x![oc,]);
// let osize = (size - kern + 2 * pad) / stride + 1;
// let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW
// let a_pad = f.comp("A_pad", x![ic, size + 2 * pad, size + 2 * pad],
// x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
// a_pad.set_inline(true);
//
// let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0)));
// let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
// b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
// let (b_final, add) = if add!= 0 { // add-relu
// let add = f.buf("ADD", F32, In, x![oc, osize, osize]);
// (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add))
// } else {
// (if relu!= 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None)
// };
// let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
// b_init.before(b, 3).before(b_final, 3);
// b_init.store(buf_b);
// b.store_at(buf_b, x![i0, i1, i2]);
// b_final.store(buf_b);
//
// let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap();
//
// static mut ID: u32 = 0;
// let id = unsafe { (ID, ID += 1).0 };
// let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
// let b1 = *b;
// (move |i, add| {
// if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
// }, b1)
// }
fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) {
let f = Func::new("maxpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
a_pad.set_inline(true);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]);
let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的
let b = f.comp("B", x![chan, osize, osize, kern, kern],
x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2))));
b_init.before(b, 3);
b_init.store(buf_b);
b.store_at(buf_b, x![i0, i1, i2]);
b.tag(0, Parallel);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) { | let b_init = f.comp("B_init", x![chan,], x!(0));
let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0)));
let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size))));
b_init.before(b, 1).before(b_final, 1);
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_final.store(buf_b);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn gemv(m: u32, n: u32) -> (impl Fn(M), M) {
let f = Func::new("gemv");
let a = f.buf("A", F32, In, x![n,]);
let w = f.buf("W", F32, In, x![m, n]);
let c = f.buf("C", F32, In, x![m,]);
let buf_b = f.buf("B", F32, Out, x![m,]);
let b_init = f.comp("B_init", x![m,], x!(c(i0)));
let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0)));
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_init.before(b, 1);
b.tag(0, Parallel);
let lib = f.codegen(&[a, w, c, buf_b]).unwrap();
let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1)
}
fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) {
let expansion = if bottleneck { 4 } else { 1 };
let downsample = stride!= 1 || inplanes!= planes * expansion;
if bottleneck {
let (f1, b1) = conv(inplanes, planes, size, 1, stride, 0, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 0, 1);
let (f3, b3) = conv(planes, planes * expansion, size / stride, 1, 1, 0, 1, 1);
let f4 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f4, _)) = &f4 { f4(i, None); }
f1(i, None);
f2(b1, None);
f3(b2, Some(if let Some((_, b4)) = f4 { b4 } else { i }));
}), b3)
} else {
let (f1, b1) = conv(inplanes, planes, size, 3, stride, 1, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 1, 1);
let f3 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f3, _)) = &f3 { f3(i, None); }
f1(i, None);
f2(b1, Some(if let Some((_, b3)) = f3 { b3 } else { i }));
}), b2)
}
}
fn layer(inplanes: u32, planes: u32, blocks: u32, size: u32, stride: u32, bottleneck: bool) -> (impl Fn(M), M) {
let expansion = if bottleneck { 4 } else { 1 };
let mut layers = Vec::with_capacity(blocks as _);
layers.push(block(inplanes, planes, size, stride, bottleneck));
for _ in 1..blocks {
layers.push(block(planes * expansion, planes, size / stride, 1, bottleneck));
}
let b = layers.last().unwrap().1;
(move |mut i| for (f, b) in &layers { f((i, i = *b).0); }, b)
}
fn main() {
parallel_init(0);
let args = env::args().collect::<Vec<_>>();
assert_eq!(args.len(), 3, "usage: cargo run --bin resnet <layer> <repeat>");
let repeat = args[2].parse::<u32>().unwrap();
let (blocks, bottleneck) = match args[1].as_str() {
"18" => (&[2, 2, 2, 2], false),
"34" => (&[3, 4, 6, 3], false),
"50" => (&[3, 4, 6, 3], true),
"101" => (&[3, 4, 23, 3], true),
"152" => (&[3, 8, 36, 3], true),
x => panic!("expect 1st argument to be [18, 34, 50, 101, 152], found {}", x),
};
let expansion = if bottleneck { 4 } else { 1 };
let input = Func::new("_").buf("input", F32, In, x![3, 224, 224]).array(read!("input",));
let (f1, b1) = conv(3, 64, 224, 7, 2, 3, 0, 1);
let (f2, b2) = maxpool(64, 112, 3, 2, 1);
let (f3, b3) = layer(64, 64, blocks[0], 56, 1, bottleneck);
let (f4, b4) = layer(64 * expansion, 128, blocks[1], 56, 2, bottleneck);
let (f5, b5) = layer(128 * expansion, 256, blocks[2], 28, 2, bottleneck);
let (f6, b6) = layer(256 * expansion, 512, blocks[3], 14, 2, bottleneck);
let (f7, b7) = avgpool(512 * expansion, 7);
let (f8, b8) = gemv(1000, 512 * expansion);
for _ in 0..4 {
let beg = Instant::now();
for _ in 0..repeat {
f1(*input, None);
f2(b1);
f3(b2);
f4(b3);
f5(b4);
f6(b5);
f7(b6);
f8(b7);
}
println!("{}s", Instant::now().duration_since(beg).as_secs_f32() / repeat as f32);
}
fn softmax(x: &mut [f32]) {
let mut m = f32::NEG_INFINITY;
for x in x.iter() { m = m.max(*x); }
let mut s = 0.0;
for x in x.iter_mut() { s += (*x = (*x - m).exp(), *x).1; }
for x in x.iter_mut() { *x /= s; }
}
let result = b8.transmute::<f32, _>(1000);
softmax(result.flat());
let mut result = result.flat().iter().copied().enumerate().collect::<Vec<_>>();
result.sort_unstable_by(|&(_, x1), &(_, x2)| if x1 > x2 { Less } else if x1 < x2 { Greater } else { Equal });
for (i, x) in &result[0..5] { println!("class = {}, prob = {}", i, x) }
} | let f = Func::new("avgpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let buf_b = f.buf("B", F32, Out, x![chan,]); | random_line_split |
resnet.rs | use plant::*;
use std::{rc::Rc, time::Instant, env, cmp::Ordering::*};
macro_rules! read { ($s: expr, $($arg:tt)*) => { ArrayInit::Data(&std::fs::read(&format!(concat!("resnet_data/", $s), $($arg)*)).unwrap()) }; }
type M = Slice<u8, usize>;
static TILE_MAP: [([u32; 6], [u32; 12]); 26] = [
// resnet18, 34
([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]),
([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]),
([64, 128, 56, 1, 2, 0], [4, 8, 2, 1, 1, 1, 14, 1, 2, 1, 1, 1]),
([64, 128, 56, 3, 2, 1], [16, 1, 8, 1, 1, 14, 14, 2, 2, 16, 1, 3]),
([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]),
([128, 256, 28, 1, 2, 0], [8, 4, 8, 1, 1, 14, 14, 2, 1, 1, 1, 1]),
([128, 256, 28, 3, 2, 1], [8, 2, 16, 1, 1, 1, 14, 1, 1, 2, 1, 1]),
([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]),
([256, 512, 14, 1, 2, 0], [16, 1, 8, 1, 7, 1, 7, 1, 1, 128, 1, 1]),
([256, 512, 14, 3, 2, 1], [8, 2, 32, 1, 1, 14, 7, 2, 1, 4, 1, 1]),
([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]),
// resent50, 101, 152,有5个shape前面出现过了
// ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]),
([64, 64, 56, 1, 1, 0], [4, 2, 1, 2, 1, 2, 8, 1, 1, 1, 1, 1]),
// ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]),
([64, 256, 56, 1, 1, 0], [8, 1, 2, 1, 2, 2, 8, 1, 1, 1, 1, 1]),
([256, 64, 56, 1, 1, 0], [8, 1, 2, 1, 2, 1, 8, 1, 1, 1, 1, 1]),
([256, 128, 56, 1, 2, 0], [16, 2, 2, 1, 1, 1, 14, 1, 4, 1, 1, 1]),
// ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]),
([128, 512, 28, 1, 1, 0], [4, 1, 8, 1, 1, 1, 14, 2, 1, 8, 1, 1]),
([256, 512, 56, 1, 2, 0], [16, 2, 8, 1, 1, 2, 14, 1, 2, 1, 1, 1]),
([512, 128, 28, 1, 1, 0], [1, 8, 8, 1, 1, 2, 14, 2, 1, 2, 1, 1]),
([512, 256, 28, 1, 2, 0], [8, 2, 2, 1, 1, 1, 14, 1, 2, 2, 1, 1]),
// ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]),
([256, 1024, 14, 1, 1, 0], [8, 1, 64, 2, 1, 7, 14, 1, 1, 128, 1, 1]),
([512, 1024, 28, 1, 2, 0], [16, 1, 32, 2, 1, 1, 14, 2, 1, 2, 1, 1]),
([1024, 256, 14, 1, 1, 0], [8, 1, 2, 2, 1, 1, 14, 1, 1, 1024, 1, 1]),
([1024, 512, 14, 1, 2, 0], [8, 2, 1, 1, 1, 2, 7, 2, 1, 128, 1, 1]),
// ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]),
([512, 2048, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 1, 1, 1]),
([1024, 2048, 14, 1, 2, 0], [4, 16, 1, 1, 1, 7, 7, 2, 1, 8, 1, 1]),
([2048, 512, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 2048, 1, 1]),
];
fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) -> (impl Fn(M, Option<M>), M) {
let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu);
let f = Func::new(&name);
let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW
let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW
let bias = f.buf("BIAS", F32, In, x![oc,]);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_add = if add!= 0 { Some(f.buf("ADD", F32, In, x![oc, osize, osize])) } else { None };
let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW
static mut LIB_CACHE: Vec<([u32; 8], Rc<Lib>)> = Vec::new();
let lib_cache = unsafe { &mut LIB_CACHE };
let lib = if let Some((_, x)) = lib_cache.iter().find(|(k, _)|
k == &[ic, oc, size, kern, stride, pad, add, relu]) {
println!("{} reused", name);
x.clone()
} else {
println!("{} compiling", name);
let [ff0, ff1, ff2, xx0, xx1, xx2, yy0, yy1, yy2, rc0, rx0, ry0] =
TILE_MAP.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad]).unwrap().1;
let pad_buf = if pad == 0 { a } else {
let pad_size = (osize - 1) * stride + kern; // <= size + 2 * pad,因为osize中/ stride不一定是整除
let pad_buf = f.buf("pad_buf", F32, Temp, x![ic, pad_size, pad_size]).set_loc(Local);
f.comp("cache_pad", x![ic, pad_size, pad_size],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }))
.tags(0..=(if ic < 32 { 1 } else { 0 }), Parallel).store(pad_buf);
pad_buf
};
let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
b.set_expr(x!(pad_buf(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
let mut b_final = x!(b(i0, i1, i2, 0, 0, 0) + bias(i0));
if let Some(x) = buf_add { // add-relu
b_final = x!(max::<f32>(0, b_final + x(i0, i1, i2)))
} else if relu!= 0 { b_final = x!(max::<f32>(0, b_final)); }
let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
for b in &[b, b_final] {
b.split(0, ff0).split(0, ff1).split(0, ff2)
.split(4, xx0).split(4, xx1).split(4, xx2)
.split(8, yy0).split(8, yy1).split(8, yy2);
}
b.split(12, rc0).split(14, rx0).split(16, ry0);
// ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i, rc_o, rc_i, rx_o, rx_i, ry_o, ry_i
b.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 12), (7, 14), (8, 16), (9, 2), (10, 6), (11, 10), (12, 13), (13, 15), (14, 17), (15, 3), (16, 7), (17, 11), ]);
// ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, rc_o, rx_o, ry_o, ff_o_i, yy_o_i, xx_o_i, rc_i, rx_i, ry_i, ff_i, yy_i, xx_i
// ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i
b_final.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 2), (7, 6), (8, 10), (9, 3), (10, 7), (11, 11), ]);
// ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, ff_o_i, yy_o_i, xx_o_i, ff_i, yy_i, xx_i
b.tags(0..=(if oc / ff0 / ff1 / ff2 < 32 { 5 } else { 0 }), Parallel);
if yy0 > 1 && yy0 < 32 { b.tag(17, Vectorize); }
let (ff_local, xx_local, yy_local) = (ff0 * ff1, xx0 * xx1, yy0 * yy1);
let b_local = f.buf("b_local", F32, Temp, x![ff_local, xx_local, yy_local])
.set_loc(Local).set_zero_init(true);
b_local.alloc_at(b, 5);
b.before(b_final, 6);
b.store_at(b_local, x![i0 % ff_local, i1 % xx_local, i2 % yy_local]);
b_final.store(buf_b);
if pad_buf!= a { pad_buf.alloc_at_func(); }
f.compile_arg("-mprefer-vector-width=512");
let lib = Rc::new(if let Some(x) = buf_add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap());
lib_cache.push(([ic, oc, size, kern, stride, pad, add, relu], lib.clone()));
lib
};
static mut ID: u32 = 0;
let id = unsafe { (ID, ID += 1).0 };
let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i, add| {
if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
}, b1)
}
// naive版本,能跑但很慢
// fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32)
// -> (impl Fn(M, Option<M>), M) {
// println!("ic: {}, oc: {}, size: {}, kern: {}, stride: {}, pad: {}", ic, oc, size, kern, stride, pad);
//
// let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu);
// let f = Func::new(&name);
// let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW
// let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW
// let bias = f.buf("BIAS", F32, In, x![oc,]);
// let osize = (size - kern + 2 * pad) / stride + 1;
// let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW
// let a_pad = f.comp("A_pad", x![ic, size + 2 * pad, size + 2 * pad],
// x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
// a_pad.set_inline(true);
//
// let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0)));
// let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
// b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
// let (b_final, add) = if add!= 0 { // add-relu
// let add = f.buf("ADD", F32, In, x![oc, osize, osize]);
// (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add))
// } else {
// (if relu!= 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None)
// };
// let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
// b_init.before(b, 3).before(b_final, 3);
// b_init.store(buf_b);
// b.store_at(buf_b, x![i0, i1, i2]);
// b_final.store(buf_b);
//
// let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap();
//
// static mut ID: u32 = 0;
// let id = unsafe { (ID, ID += 1).0 };
// let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
// let b1 = *b;
// (move |i, add| {
// if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
// }, b1)
// }
fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) {
let f = Func::new("maxpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
a_pad.set_inline(true);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]);
let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的
let b = f.comp("B", x![chan, osize, osize, kern, kern],
x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2))));
b_init.before(b, 3);
b_init.store(buf_b);
b.store_at(buf_b, x![i0, i1, i2]);
b.tag(0, Parallel);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) {
let f = Func::new("avgpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let buf_b = f.buf("B", F32, Out, x![chan,]);
let b_init = f.comp("B_init", x![chan,], x!(0));
let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0)));
let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size))));
b_init.before(b, 1).before(b_final, 1);
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_final.store(buf_b);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn gemv(m: u32, n: u32) -> (impl Fn(M), M) {
let f = Func::new("gemv");
let a = f.buf("A", F32, In, x![n,]);
let w = f.buf("W", F32, In, x![m, n]);
let c = f.buf("C", F32, In, x![m,]);
let buf_b = f.buf("B", F32, Out, x![m,]);
let b_init = f.comp("B_init", x![m,], x!(c(i0)));
let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0)));
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_init.before(b, 1);
b.tag(0, Parallel);
let lib = f.codegen(&[a, w, c, buf_b]).unwrap();
let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1)
}
fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) {
let expansion = if bottleneck { 4 } else { 1 };
let downsample = stride!= 1 || inplanes!= planes * expansion;
if bottleneck { | let (f1, b1) = conv(inplanes, planes, size, 1, stride, 0, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 0, 1);
let (f3, b3) = conv(planes, planes * expansion, size / stride, 1, 1, 0, 1, 1);
let f4 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f4, _)) = &f4 { f4(i, None); }
f1(i, None);
f2(b1, None);
f3(b2, Some(if let Some((_, b4)) = f4 { b4 } else { i }));
}), b3)
} else {
let (f1, b1) = conv(inplanes, planes, size, 3, stride, 1, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 1, 1);
let f3 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f3, _)) = &f3 { f3(i, None); }
f1(i, None);
f2(b1, Some(if let Some((_, b3)) = f3 { b3 } else { i }));
}), b2)
}
}
fn layer(inplanes: u32, planes: u32, blocks: u32, size: u32, stride: u32, bottleneck: bool) -> (impl Fn(M), M) {
let expansion = if bottleneck { 4 } else { 1 };
let mut layers = Vec::with_capacity(blocks as _);
layers.push(block(inplanes, planes, size, stride, bottleneck));
for _ in 1..blocks {
layers.push(block(planes * expansion, planes, size / stride, 1, bottleneck));
}
let b = layers.last().unwrap().1;
(move |mut i| for (f, b) in &layers { f((i, i = *b).0); }, b)
}
fn main() {
parallel_init(0);
let args = env::args().collect::<Vec<_>>();
assert_eq!(args.len(), 3, "usage: cargo run --bin resnet <layer> <repeat>");
let repeat = args[2].parse::<u32>().unwrap();
let (blocks, bottleneck) = match args[1].as_str() {
"18" => (&[2, 2, 2, 2], false),
"34" => (&[3, 4, 6, 3], false),
"50" => (&[3, 4, 6, 3], true),
"101" => (&[3, 4, 23, 3], true),
"152" => (&[3, 8, 36, 3], true),
x => panic!("expect 1st argument to be [18, 34, 50, 101, 152], found {}", x),
};
let expansion = if bottleneck { 4 } else { 1 };
let input = Func::new("_").buf("input", F32, In, x![3, 224, 224]).array(read!("input",));
let (f1, b1) = conv(3, 64, 224, 7, 2, 3, 0, 1);
let (f2, b2) = maxpool(64, 112, 3, 2, 1);
let (f3, b3) = layer(64, 64, blocks[0], 56, 1, bottleneck);
let (f4, b4) = layer(64 * expansion, 128, blocks[1], 56, 2, bottleneck);
let (f5, b5) = layer(128 * expansion, 256, blocks[2], 28, 2, bottleneck);
let (f6, b6) = layer(256 * expansion, 512, blocks[3], 14, 2, bottleneck);
let (f7, b7) = avgpool(512 * expansion, 7);
let (f8, b8) = gemv(1000, 512 * expansion);
for _ in 0..4 {
let beg = Instant::now();
for _ in 0..repeat {
f1(*input, None);
f2(b1);
f3(b2);
f4(b3);
f5(b4);
f6(b5);
f7(b6);
f8(b7);
}
println!("{}s", Instant::now().duration_since(beg).as_secs_f32() / repeat as f32);
}
fn softmax(x: &mut [f32]) {
let mut m = f32::NEG_INFINITY;
for x in x.iter() { m = m.max(*x); }
let mut s = 0.0;
for x in x.iter_mut() { s += (*x = (*x - m).exp(), *x).1; }
for x in x.iter_mut() { *x /= s; }
}
let result = b8.transmute::<f32, _>(1000);
softmax(result.flat());
let mut result = result.flat().iter().copied().enumerate().collect::<Vec<_>>();
result.sort_unstable_by(|&(_, x1), &(_, x2)| if x1 > x2 { Less } else if x1 < x2 { Greater } else { Equal });
for (i, x) in &result[0..5] { println!("class = {}, prob = {}", i, x) }
}
| conditional_block |
|
resnet.rs | use plant::*;
use std::{rc::Rc, time::Instant, env, cmp::Ordering::*};
macro_rules! read { ($s: expr, $($arg:tt)*) => { ArrayInit::Data(&std::fs::read(&format!(concat!("resnet_data/", $s), $($arg)*)).unwrap()) }; }
type M = Slice<u8, usize>;
static TILE_MAP: [([u32; 6], [u32; 12]); 26] = [
// resnet18, 34
([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]),
([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]),
([64, 128, 56, 1, 2, 0], [4, 8, 2, 1, 1, 1, 14, 1, 2, 1, 1, 1]),
([64, 128, 56, 3, 2, 1], [16, 1, 8, 1, 1, 14, 14, 2, 2, 16, 1, 3]),
([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]),
([128, 256, 28, 1, 2, 0], [8, 4, 8, 1, 1, 14, 14, 2, 1, 1, 1, 1]),
([128, 256, 28, 3, 2, 1], [8, 2, 16, 1, 1, 1, 14, 1, 1, 2, 1, 1]),
([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]),
([256, 512, 14, 1, 2, 0], [16, 1, 8, 1, 7, 1, 7, 1, 1, 128, 1, 1]),
([256, 512, 14, 3, 2, 1], [8, 2, 32, 1, 1, 14, 7, 2, 1, 4, 1, 1]),
([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]),
// resent50, 101, 152,有5个shape前面出现过了
// ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]),
([64, 64, 56, 1, 1, 0], [4, 2, 1, 2, 1, 2, 8, 1, 1, 1, 1, 1]),
// ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]),
([64, 256, 56, 1, 1, 0], [8, 1, 2, 1, 2, 2, 8, 1, 1, 1, 1, 1]),
([256, 64, 56, 1, 1, 0], [8, 1, 2, 1, 2, 1, 8, 1, 1, 1, 1, 1]),
([256, 128, 56, 1, 2, 0], [16, 2, 2, 1, 1, 1, 14, 1, 4, 1, 1, 1]),
// ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]),
([128, 512, 28, 1, 1, 0], [4, 1, 8, 1, 1, 1, 14, 2, 1, 8, 1, 1]),
([256, 512, 56, 1, 2, 0], [16, 2, 8, 1, 1, 2, 14, 1, 2, 1, 1, 1]),
([512, 128, 28, 1, 1, 0], [1, 8, 8, 1, 1, 2, 14, 2, 1, 2, 1, 1]),
([512, 256, 28, 1, 2, 0], [8, 2, 2, 1, 1, 1, 14, 1, 2, 2, 1, 1]),
// ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]),
([256, 1024, 14, 1, 1, 0], [8, 1, 64, 2, 1, 7, 14, 1, 1, 128, 1, 1]),
([512, 1024, 28, 1, 2, 0], [16, 1, 32, 2, 1, 1, 14, 2, 1, 2, 1, 1]),
([1024, 256, 14, 1, 1, 0], [8, 1, 2, 2, 1, 1, 14, 1, 1, 1024, 1, 1]),
([1024, 512, 14, 1, 2, 0], [8, 2, 1, 1, 1, 2, 7, 2, 1, 128, 1, 1]),
// ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]),
([512, 2048, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 1, 1, 1]),
([1024, 2048, 14, 1, 2, 0], [4, 16, 1, 1, 1, 7, 7, 2, 1, 8, 1, 1]),
([2048, 512, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 2048, 1, 1]),
];
fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) -> (impl Fn(M, Option<M>), M) {
let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu);
let f = Func::new(&name);
let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW
let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW
let bias = f.buf("BIAS", F32, In, x![oc,]);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_add = if add!= 0 { Some(f.buf("ADD", F32, In, x![oc, osize, osize])) } else { None };
let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW
static mut LIB_CACHE: Vec<([u32; 8], Rc<Lib>)> = Vec::new();
let lib_cache = unsafe { &mut LIB_CACHE };
let lib = if let Some((_, x)) = lib_cache.iter().find(|(k, _)|
k == &[ic, oc, size, kern, stride, pad, add, relu]) {
println!("{} reused", name);
x.clone()
} else {
println!("{} compiling", name);
let [ff0, ff1, ff2, xx0, xx1, xx2, yy0, yy1, yy2, rc0, rx0, ry0] =
TILE_MAP.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad]).unwrap().1;
let pad_buf = if pad == 0 { a } else {
let pad_size = (osize - 1) * stride + kern; // <= size + 2 * pad,因为osize中/ stride不一定是整除
let pad_buf = f.buf("pad_buf", F32, Temp, x![ic, pad_size, pad_size]).set_loc(Local);
f.comp("cache_pad", x![ic, pad_size, pad_size],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }))
.tags(0..=(if ic < 32 { 1 } else { 0 }), Parallel).store(pad_buf);
pad_buf
};
let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
b.set_expr(x!(pad_buf(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
let mut b_final = x!(b(i0, i1, i2, 0, 0, 0) + bias(i0));
if let Some(x) = buf_add { // add-relu
b_final = x!(max::<f32>(0, b_final + x(i0, i1, i2)))
} else if relu!= 0 { b_final = x!(max::<f32>(0, b_final)); }
let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
for b in &[b, b_final] {
b.split(0, ff0).split(0, ff1).split(0, ff2)
.split(4, xx0).split(4, xx1).split(4, xx2)
.split(8, yy0).split(8, yy1).split(8, yy2);
}
b.split(12, rc0).split(14, rx0).split(16, ry0);
// ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i, rc_o, rc_i, rx_o, rx_i, ry_o, ry_i
b.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 12), (7, 14), (8, 16), (9, 2), (10, 6), (11, 10), (12, 13), (13, 15), (14, 17), (15, 3), (16, 7), (17, 11), ]);
// ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, rc_o, rx_o, ry_o, ff_o_i, yy_o_i, xx_o_i, rc_i, rx_i, ry_i, ff_i, yy_i, xx_i
// ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i
b_final.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 2), (7, 6), (8, 10), (9, 3), (10, 7), (11, 11), ]);
// ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, ff_o_i, yy_o_i, xx_o_i, ff_i, yy_i, xx_i
b.tags(0..=(if oc / ff0 / ff1 / ff2 < 32 { 5 } else { 0 }), Parallel);
if yy0 > 1 && yy0 < 32 { b.tag(17, Vectorize); }
let (ff_local, xx_local, yy_local) = (ff0 * ff1, xx0 * xx1, yy0 * yy1);
let b_local = f.buf("b_local", F32, Temp, x![ff_local, xx_local, yy_local])
.set_loc(Local).set_zero_init(true);
b_local.alloc_at(b, 5);
b.before(b_final, 6);
b.store_at(b_local, x![i0 % ff_local, i1 % xx_local, i2 % yy_local]);
b_final.store(buf_b);
if pad_buf!= a { pad_buf.alloc_at_func(); }
f.compile_arg("-mprefer-vector-width=512");
let lib = Rc::new(if let Some(x) = buf_add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap());
lib_cache.push(([ic, oc, size, kern, stride, pad, add, relu], lib.clone()));
lib
};
static mut ID: u32 = 0;
let id = unsafe { (ID, ID += 1).0 };
let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i, add| {
if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
}, b1)
}
// naive版本,能跑但很慢
// fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32)
// -> (impl Fn(M, Option<M>), M) {
// println!("ic: {}, oc: {}, size: {}, kern: {}, stride: {}, pad: {}", ic, oc, size, kern, stride, pad);
//
// let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu);
// let f = Func::new(&name);
// let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW
// let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW
// let bias = f.buf("BIAS", F32, In, x![oc,]);
// let osize = (size - kern + 2 * pad) / stride + 1;
// let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW
// let a_pad = f.comp("A_pad", x![ic, size + 2 * pad, size + 2 * pad],
// x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
// a_pad.set_inline(true);
//
// let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0)));
// let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
// b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
// let (b_final, add) = if add!= 0 { // add-relu
// let add = f.buf("ADD", F32, In, x![oc, osize, osize]);
// (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add))
// } else {
// (if relu!= 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None)
// };
// let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
// b_init.before(b, 3).before(b_final, 3);
// b_init.store(buf_b);
// b.store_at(buf_b, x![i0, i1, i2]);
// b_final.store(buf_b);
//
// let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap();
//
// static mut ID: u32 = 0;
// let id = unsafe { (ID, ID += 1).0 };
// let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
// let b1 = *b;
// (move |i, add| {
// if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
// }, b1)
// }
fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) {
let f = Func::new("maxpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
a_pad.set_inline(true);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]);
let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的
let b = f.comp("B", x![chan, osize, osize, kern, kern],
x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2))));
b_init.before(b, 3);
b_init.store(buf_b);
b.store_at(buf_b, x![i0, i1, i2]);
b.tag(0, Parallel);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) {
let f = Func::new("avgpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let buf_b = f.buf("B", F32, Out, x![chan,]);
let b_init = f.comp("B_init", x![chan,], x!(0));
let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0)));
let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size))));
b_init.before(b, 1).before(b_final, 1);
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_final.store(buf_b);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn gemv(m: u32, n: u32) -> (impl Fn(M), M) {
let f = Func::new("gemv");
let a = f.buf("A", F32, In, x![n,]);
let w = f.buf("W", F32, In, x![m, n]);
let c = f.buf("C", F32, In, x![m,]);
let buf_b = f.buf("B", F32, Out, x![m,]);
let b_init = f.comp("B_init", x![m,], x!(c(i0)));
let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0)));
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_init.before(b, 1);
b.tag(0, Parallel);
let lib = f.codegen(&[a, w, c, buf_b]).unwrap();
let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1)
}
fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) {
let expansion = if bottleneck { 4 } else { 1 };
let downsample = stride!= 1 || inpl | }), b2)
}
}
fn layer(inplanes: u32, planes: u32, blocks: u32, size: u32, stride: u32, bottleneck: bo
ol) -> (impl Fn(M), M) {
let expansion = if bottleneck { 4 } else { 1 };
let mut layers = Vec::with_capacity(blocks as _);
layers.push(block(inplanes, planes, size, stride, bottleneck));
for _ in 1..blocks {
layers.push(block(planes * expansion, planes, size / stride, 1, bottleneck));
}
let b = layers.last().unwrap().1;
(move |mut i| for (f, b) in &layers { f((i, i = *b).0); }, b)
}
fn main() {
parallel_init(0);
let args = env::args().collect::<Vec<_>>();
assert_eq!(args.len(), 3, "usage: cargo run --bin resnet <layer> <repeat>");
let repeat = args[2].parse::<u32>().unwrap();
let (blocks, bottleneck) = match args[1].as_str() {
"18" => (&[2, 2, 2, 2], false),
"34" => (&[3, 4, 6, 3], false),
"50" => (&[3, 4, 6, 3], true),
"101" => (&[3, 4, 23, 3], true),
"152" => (&[3, 8, 36, 3], true),
x => panic!("expect 1st argument to be [18, 34, 50, 101, 152], found {}", x),
};
let expansion = if bottleneck { 4 } else { 1 };
let input = Func::new("_").buf("input", F32, In, x![3, 224, 224]).array(read!("input",));
let (f1, b1) = conv(3, 64, 224, 7, 2, 3, 0, 1);
let (f2, b2) = maxpool(64, 112, 3, 2, 1);
let (f3, b3) = layer(64, 64, blocks[0], 56, 1, bottleneck);
let (f4, b4) = layer(64 * expansion, 128, blocks[1], 56, 2, bottleneck);
let (f5, b5) = layer(128 * expansion, 256, blocks[2], 28, 2, bottleneck);
let (f6, b6) = layer(256 * expansion, 512, blocks[3], 14, 2, bottleneck);
let (f7, b7) = avgpool(512 * expansion, 7);
let (f8, b8) = gemv(1000, 512 * expansion);
for _ in 0..4 {
let beg = Instant::now();
for _ in 0..repeat {
f1(*input, None);
f2(b1);
f3(b2);
f4(b3);
f5(b4);
f6(b5);
f7(b6);
f8(b7);
}
println!("{}s", Instant::now().duration_since(beg).as_secs_f32() / repeat as f32);
}
fn softmax(x: &mut [f32]) {
let mut m = f32::NEG_INFINITY;
for x in x.iter() { m = m.max(*x); }
let mut s = 0.0;
for x in x.iter_mut() { s += (*x = (*x - m).exp(), *x).1; }
for x in x.iter_mut() { *x /= s; }
}
let result = b8.transmute::<f32, _>(1000);
softmax(result.flat());
let mut result = result.flat().iter().copied().enumerate().collect::<Vec<_>>();
result.sort_unstable_by(|&(_, x1), &(_, x2)| if x1 > x2 { Less } else if x1 < x2 { Greater } else { Equal });
for (i, x) in &result[0..5] { println!("class = {}, prob = {}", i, x) }
}
| anes != planes * expansion;
if bottleneck {
let (f1, b1) = conv(inplanes, planes, size, 1, stride, 0, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 0, 1);
let (f3, b3) = conv(planes, planes * expansion, size / stride, 1, 1, 0, 1, 1);
let f4 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f4, _)) = &f4 { f4(i, None); }
f1(i, None);
f2(b1, None);
f3(b2, Some(if let Some((_, b4)) = f4 { b4 } else { i }));
}), b3)
} else {
let (f1, b1) = conv(inplanes, planes, size, 3, stride, 1, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 1, 1);
let f3 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f3, _)) = &f3 { f3(i, None); }
f1(i, None);
f2(b1, Some(if let Some((_, b3)) = f3 { b3 } else { i })); | identifier_body |
terminal.rs | use crate::os_glue::Glue;
use crate::{Features, Key, TermOut};
use stakker::{fwd, timer_max, Fwd, MaxTimerKey, Share, CX};
use std::error::Error;
use std::mem;
use std::panic::PanicInfo;
use std::sync::Arc;
use std::time::Duration;
/// Actor that manages the connection to the terminal
pub struct Terminal {
resize: Fwd<Option<Share<TermOut>>>,
input: Fwd<Key>, | termout: Share<TermOut>,
glue: Glue,
disable_output: bool,
paused: bool,
inbuf: Vec<u8>,
check_enable: bool,
force_timer: MaxTimerKey,
check_timer: MaxTimerKey,
cleanup: Vec<u8>,
panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send>>,
}
impl Terminal {
/// Set up the terminal. Sends a message back to `resize`
/// immediately, which provides a reference to the shared
/// [`TermOut`] which is used to buffer and flush terminal output
/// data.
///
/// Whenever the window size changes, a new `resize` message is
/// sent. When the terminal output is paused, `None` is sent to
/// `resize` to let the app know that there is no output available
/// right now.
///
/// Input keys received are sent to `input` once decoded.
///
/// In case of an error that can't be handled, cleans up the
/// terminal state and terminates the actor with
/// `ActorDied::Failed`. The actor that created the terminal can
/// catch that and do whatever cleanup is necessary before
/// aborting the process.
///
/// # Panic handling
///
/// When Rust panics, the terminal must be restored to its normal
/// state otherwise things would be left in a bad state for the
/// user (in cooked mode with no echo, requiring the user to
/// blindly type `reset` on the command-line). So this code saves
/// a copy of the current panic handler (using
/// `std::panic::take_hook`), and then installs its own handler
/// that does terminal cleanup before calling on to the saved
/// panic handler. This mean that if any custom panic handler is
/// needed by the application, then it must be set up before the
/// call to [`Terminal::init`].
///
/// [`TermOut`]: struct.TermOut.html
pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> {
// TODO: Query TERM/terminfo/environment for features to put in Features
let features = Features { colour_256: false };
let term = cx.this().clone();
let glue = match Glue::new(cx, term) {
Ok(v) => v,
Err(e) => {
cx.fail(e);
return None;
}
};
let termout = Share::new(cx, TermOut::new(features));
let mut this = Self {
resize,
input,
termout,
glue,
disable_output: false,
paused: false,
inbuf: Vec::new(),
check_enable: false,
force_timer: MaxTimerKey::default(),
check_timer: MaxTimerKey::default(),
cleanup: b"\x1Bc".to_vec(),
panic_hook: Arc::new(std::panic::take_hook()),
};
this.handle_resize(cx);
this.update_panic_hook();
Some(this)
}
/// Enable or disable generation of the [`Key::Check`] keypress,
/// which occurs in a gap in typing, 300ms after the last key
/// pressed. This may be used to do validation if that's too
/// expensive to do on every keypress.
///
/// [`Key::Check`]: enum.Key.html#variant.Check
pub fn check(&mut self, _cx: CX![], enable: bool) {
self.check_enable = enable;
}
/// Ring the bell (i.e. beep) immediately. Doesn't wait for the
/// buffered terminal data to be flushed. Will output even when
/// paused.
pub fn bell(&mut self, cx: CX![]) {
if!self.disable_output {
if let Err(e) = self.glue.write(&b"\x07"[..]) {
self.disable_output = true;
self.failure(cx, e);
}
}
}
/// Pause terminal input and output handling. Sends the cleanup
/// sequence to the terminal, and switches to cooked mode. Sends
/// a `resize` message with `None` to tell the app that output is
/// disabled.
///
/// This call should be used before forking off a process which
/// might prompt the user and receive user input, otherwise this
/// process would compete with the sub-process for user input.
/// Resume after the subprocess has finished with the `resume`
/// call.
pub fn pause(&mut self, cx: CX![]) {
if!self.paused {
fwd!([self.resize], None);
self.glue.input(false);
self.termout.rw(cx).discard();
self.termout.rw(cx).bytes(&self.cleanup[..]);
self.termout.rw(cx).flush();
self.flush(cx);
self.paused = true;
self.update_panic_hook();
}
}
/// Resume terminal output and input handling. Switches to raw
/// mode and sends a resize message to trigger a full redraw.
pub fn resume(&mut self, cx: CX![]) {
if self.paused {
self.paused = false;
self.glue.input(true);
self.termout.rw(cx).discard();
self.handle_resize(cx);
self.update_panic_hook();
}
}
// Handle an unrecoverable failure. Try to clean up before
// terminating the actor.
fn failure(&mut self, cx: CX![], e: impl Error +'static) {
self.pause(cx);
cx.fail(e);
}
/// Flush to the terminal all the data that's ready for sending
/// from the TermOut buffer. Use [`TermOut::flush`] first to mark
/// the point up to which data should be flushed.
///
/// [`TermOut::flush`]: struct.TermOut.html#method.flush
pub fn flush(&mut self, cx: CX![]) {
if self.termout.rw(cx).new_cleanup.is_some() {
// Don't replace unless we're sure there's a new value
if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) {
self.cleanup = cleanup;
self.update_panic_hook();
}
}
if!self.disable_output {
if self.paused {
// Just drop the output whilst paused. We'll trigger
// a full refresh on resuming
self.termout.rw(cx).drain_flush();
} else {
let ob = self.termout.rw(cx);
let result = self.glue.write(ob.data_to_flush());
ob.drain_flush();
if let Err(e) = result {
self.disable_output = true;
self.failure(cx, e);
}
}
}
}
/// Handle a resize event from the TTY. Gets new size, and
/// notifies upstream.
pub(crate) fn handle_resize(&mut self, cx: CX![]) {
match self.glue.get_size() {
Ok((sy, sx)) => {
self.termout.rw(cx).set_size(sy, sx);
fwd!([self.resize], Some(self.termout.clone()));
}
Err(e) => self.failure(cx, e),
}
}
/// Handle an I/O error on the TTY input
pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) {
self.failure(cx, err);
}
/// Handle new bytes from the TTY input
pub(crate) fn handle_data_in(&mut self, cx: CX![]) {
self.glue.read_data(&mut self.inbuf);
self.do_data_in(cx, false);
}
fn do_data_in(&mut self, cx: CX![], force: bool) {
let mut pos = 0;
let len = self.inbuf.len();
if len!= 0 {
if!force {
// Note that this is too fast to catch M-Esc passed
// through screen, as that seems to apply a 300ms
// pause between the two Esc chars. For everything
// else including real terminals it should be okay.
timer_max!(
&mut self.force_timer,
cx.now() + Duration::from_millis(100),
[cx],
do_data_in(true)
);
}
while pos < len {
match Key::decode(&self.inbuf[pos..len], force) {
None => break,
Some((count, key)) => {
pos += count;
fwd!([self.input], key);
if self.check_enable {
let check_expiry = cx.now() + Duration::from_millis(300);
timer_max!(&mut self.check_timer, check_expiry, [cx], check_key());
}
}
}
}
}
self.inbuf.drain(..pos);
}
fn check_key(&mut self, _cx: CX![]) {
if self.check_enable {
fwd!([self.input], Key::Check);
}
}
// Install a panic hook that (if necessary) outputs the current
// cleanup string, restores cooked mode and then does the default
// panic action (e.g. dump out backtrace). This should be called
// every time we switch to/from raw mode, and every time the
// cleanup string is changed.
fn update_panic_hook(&mut self) {
// Discard old hook
let _ = std::panic::take_hook();
let defhook = self.panic_hook.clone();
if self.paused {
std::panic::set_hook(Box::new(move |info| defhook(info)));
} else {
let cleanup_fn = self.glue.cleanup_fn();
let cleanup = self.cleanup.clone();
std::panic::set_hook(Box::new(move |info| {
cleanup_fn(&cleanup[..]);
defhook(info);
}));
}
}
}
impl Drop for Terminal {
fn drop(&mut self) {
// Drop panic hook and clean up terminal
let _ = std::panic::take_hook();
if!self.paused {
self.glue.cleanup_fn()(&self.cleanup[..]);
}
}
} | random_line_split |
|
terminal.rs | use crate::os_glue::Glue;
use crate::{Features, Key, TermOut};
use stakker::{fwd, timer_max, Fwd, MaxTimerKey, Share, CX};
use std::error::Error;
use std::mem;
use std::panic::PanicInfo;
use std::sync::Arc;
use std::time::Duration;
/// Actor that manages the connection to the terminal
pub struct Terminal {
resize: Fwd<Option<Share<TermOut>>>,
input: Fwd<Key>,
termout: Share<TermOut>,
glue: Glue,
disable_output: bool,
paused: bool,
inbuf: Vec<u8>,
check_enable: bool,
force_timer: MaxTimerKey,
check_timer: MaxTimerKey,
cleanup: Vec<u8>,
panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send>>,
}
impl Terminal {
/// Set up the terminal. Sends a message back to `resize`
/// immediately, which provides a reference to the shared
/// [`TermOut`] which is used to buffer and flush terminal output
/// data.
///
/// Whenever the window size changes, a new `resize` message is
/// sent. When the terminal output is paused, `None` is sent to
/// `resize` to let the app know that there is no output available
/// right now.
///
/// Input keys received are sent to `input` once decoded.
///
/// In case of an error that can't be handled, cleans up the
/// terminal state and terminates the actor with
/// `ActorDied::Failed`. The actor that created the terminal can
/// catch that and do whatever cleanup is necessary before
/// aborting the process.
///
/// # Panic handling
///
/// When Rust panics, the terminal must be restored to its normal
/// state otherwise things would be left in a bad state for the
/// user (in cooked mode with no echo, requiring the user to
/// blindly type `reset` on the command-line). So this code saves
/// a copy of the current panic handler (using
/// `std::panic::take_hook`), and then installs its own handler
/// that does terminal cleanup before calling on to the saved
/// panic handler. This mean that if any custom panic handler is
/// needed by the application, then it must be set up before the
/// call to [`Terminal::init`].
///
/// [`TermOut`]: struct.TermOut.html
pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> {
// TODO: Query TERM/terminfo/environment for features to put in Features
let features = Features { colour_256: false };
let term = cx.this().clone();
let glue = match Glue::new(cx, term) {
Ok(v) => v,
Err(e) => {
cx.fail(e);
return None;
}
};
let termout = Share::new(cx, TermOut::new(features));
let mut this = Self {
resize,
input,
termout,
glue,
disable_output: false,
paused: false,
inbuf: Vec::new(),
check_enable: false,
force_timer: MaxTimerKey::default(),
check_timer: MaxTimerKey::default(),
cleanup: b"\x1Bc".to_vec(),
panic_hook: Arc::new(std::panic::take_hook()),
};
this.handle_resize(cx);
this.update_panic_hook();
Some(this)
}
/// Enable or disable generation of the [`Key::Check`] keypress,
/// which occurs in a gap in typing, 300ms after the last key
/// pressed. This may be used to do validation if that's too
/// expensive to do on every keypress.
///
/// [`Key::Check`]: enum.Key.html#variant.Check
pub fn check(&mut self, _cx: CX![], enable: bool) {
self.check_enable = enable;
}
/// Ring the bell (i.e. beep) immediately. Doesn't wait for the
/// buffered terminal data to be flushed. Will output even when
/// paused.
pub fn bell(&mut self, cx: CX![]) {
if!self.disable_output {
if let Err(e) = self.glue.write(&b"\x07"[..]) {
self.disable_output = true;
self.failure(cx, e);
}
}
}
/// Pause terminal input and output handling. Sends the cleanup
/// sequence to the terminal, and switches to cooked mode. Sends
/// a `resize` message with `None` to tell the app that output is
/// disabled.
///
/// This call should be used before forking off a process which
/// might prompt the user and receive user input, otherwise this
/// process would compete with the sub-process for user input.
/// Resume after the subprocess has finished with the `resume`
/// call.
pub fn pause(&mut self, cx: CX![]) |
/// Resume terminal output and input handling. Switches to raw
/// mode and sends a resize message to trigger a full redraw.
pub fn resume(&mut self, cx: CX![]) {
if self.paused {
self.paused = false;
self.glue.input(true);
self.termout.rw(cx).discard();
self.handle_resize(cx);
self.update_panic_hook();
}
}
// Handle an unrecoverable failure. Try to clean up before
// terminating the actor.
fn failure(&mut self, cx: CX![], e: impl Error +'static) {
self.pause(cx);
cx.fail(e);
}
/// Flush to the terminal all the data that's ready for sending
/// from the TermOut buffer. Use [`TermOut::flush`] first to mark
/// the point up to which data should be flushed.
///
/// [`TermOut::flush`]: struct.TermOut.html#method.flush
pub fn flush(&mut self, cx: CX![]) {
if self.termout.rw(cx).new_cleanup.is_some() {
// Don't replace unless we're sure there's a new value
if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) {
self.cleanup = cleanup;
self.update_panic_hook();
}
}
if!self.disable_output {
if self.paused {
// Just drop the output whilst paused. We'll trigger
// a full refresh on resuming
self.termout.rw(cx).drain_flush();
} else {
let ob = self.termout.rw(cx);
let result = self.glue.write(ob.data_to_flush());
ob.drain_flush();
if let Err(e) = result {
self.disable_output = true;
self.failure(cx, e);
}
}
}
}
/// Handle a resize event from the TTY. Gets new size, and
/// notifies upstream.
pub(crate) fn handle_resize(&mut self, cx: CX![]) {
match self.glue.get_size() {
Ok((sy, sx)) => {
self.termout.rw(cx).set_size(sy, sx);
fwd!([self.resize], Some(self.termout.clone()));
}
Err(e) => self.failure(cx, e),
}
}
/// Handle an I/O error on the TTY input
pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) {
self.failure(cx, err);
}
/// Handle new bytes from the TTY input
pub(crate) fn handle_data_in(&mut self, cx: CX![]) {
self.glue.read_data(&mut self.inbuf);
self.do_data_in(cx, false);
}
fn do_data_in(&mut self, cx: CX![], force: bool) {
let mut pos = 0;
let len = self.inbuf.len();
if len!= 0 {
if!force {
// Note that this is too fast to catch M-Esc passed
// through screen, as that seems to apply a 300ms
// pause between the two Esc chars. For everything
// else including real terminals it should be okay.
timer_max!(
&mut self.force_timer,
cx.now() + Duration::from_millis(100),
[cx],
do_data_in(true)
);
}
while pos < len {
match Key::decode(&self.inbuf[pos..len], force) {
None => break,
Some((count, key)) => {
pos += count;
fwd!([self.input], key);
if self.check_enable {
let check_expiry = cx.now() + Duration::from_millis(300);
timer_max!(&mut self.check_timer, check_expiry, [cx], check_key());
}
}
}
}
}
self.inbuf.drain(..pos);
}
fn check_key(&mut self, _cx: CX![]) {
if self.check_enable {
fwd!([self.input], Key::Check);
}
}
// Install a panic hook that (if necessary) outputs the current
// cleanup string, restores cooked mode and then does the default
// panic action (e.g. dump out backtrace). This should be called
// every time we switch to/from raw mode, and every time the
// cleanup string is changed.
fn update_panic_hook(&mut self) {
// Discard old hook
let _ = std::panic::take_hook();
let defhook = self.panic_hook.clone();
if self.paused {
std::panic::set_hook(Box::new(move |info| defhook(info)));
} else {
let cleanup_fn = self.glue.cleanup_fn();
let cleanup = self.cleanup.clone();
std::panic::set_hook(Box::new(move |info| {
cleanup_fn(&cleanup[..]);
defhook(info);
}));
}
}
}
impl Drop for Terminal {
fn drop(&mut self) {
// Drop panic hook and clean up terminal
let _ = std::panic::take_hook();
if!self.paused {
self.glue.cleanup_fn()(&self.cleanup[..]);
}
}
}
| {
if !self.paused {
fwd!([self.resize], None);
self.glue.input(false);
self.termout.rw(cx).discard();
self.termout.rw(cx).bytes(&self.cleanup[..]);
self.termout.rw(cx).flush();
self.flush(cx);
self.paused = true;
self.update_panic_hook();
}
} | identifier_body |
terminal.rs | use crate::os_glue::Glue;
use crate::{Features, Key, TermOut};
use stakker::{fwd, timer_max, Fwd, MaxTimerKey, Share, CX};
use std::error::Error;
use std::mem;
use std::panic::PanicInfo;
use std::sync::Arc;
use std::time::Duration;
/// Actor that manages the connection to the terminal
pub struct Terminal {
resize: Fwd<Option<Share<TermOut>>>,
input: Fwd<Key>,
termout: Share<TermOut>,
glue: Glue,
disable_output: bool,
paused: bool,
inbuf: Vec<u8>,
check_enable: bool,
force_timer: MaxTimerKey,
check_timer: MaxTimerKey,
cleanup: Vec<u8>,
panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send>>,
}
impl Terminal {
/// Set up the terminal. Sends a message back to `resize`
/// immediately, which provides a reference to the shared
/// [`TermOut`] which is used to buffer and flush terminal output
/// data.
///
/// Whenever the window size changes, a new `resize` message is
/// sent. When the terminal output is paused, `None` is sent to
/// `resize` to let the app know that there is no output available
/// right now.
///
/// Input keys received are sent to `input` once decoded.
///
/// In case of an error that can't be handled, cleans up the
/// terminal state and terminates the actor with
/// `ActorDied::Failed`. The actor that created the terminal can
/// catch that and do whatever cleanup is necessary before
/// aborting the process.
///
/// # Panic handling
///
/// When Rust panics, the terminal must be restored to its normal
/// state otherwise things would be left in a bad state for the
/// user (in cooked mode with no echo, requiring the user to
/// blindly type `reset` on the command-line). So this code saves
/// a copy of the current panic handler (using
/// `std::panic::take_hook`), and then installs its own handler
/// that does terminal cleanup before calling on to the saved
/// panic handler. This mean that if any custom panic handler is
/// needed by the application, then it must be set up before the
/// call to [`Terminal::init`].
///
/// [`TermOut`]: struct.TermOut.html
pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> {
// TODO: Query TERM/terminfo/environment for features to put in Features
let features = Features { colour_256: false };
let term = cx.this().clone();
let glue = match Glue::new(cx, term) {
Ok(v) => v,
Err(e) => {
cx.fail(e);
return None;
}
};
let termout = Share::new(cx, TermOut::new(features));
let mut this = Self {
resize,
input,
termout,
glue,
disable_output: false,
paused: false,
inbuf: Vec::new(),
check_enable: false,
force_timer: MaxTimerKey::default(),
check_timer: MaxTimerKey::default(),
cleanup: b"\x1Bc".to_vec(),
panic_hook: Arc::new(std::panic::take_hook()),
};
this.handle_resize(cx);
this.update_panic_hook();
Some(this)
}
/// Enable or disable generation of the [`Key::Check`] keypress,
/// which occurs in a gap in typing, 300ms after the last key
/// pressed. This may be used to do validation if that's too
/// expensive to do on every keypress.
///
/// [`Key::Check`]: enum.Key.html#variant.Check
pub fn check(&mut self, _cx: CX![], enable: bool) {
self.check_enable = enable;
}
/// Ring the bell (i.e. beep) immediately. Doesn't wait for the
/// buffered terminal data to be flushed. Will output even when
/// paused.
pub fn bell(&mut self, cx: CX![]) {
if!self.disable_output {
if let Err(e) = self.glue.write(&b"\x07"[..]) {
self.disable_output = true;
self.failure(cx, e);
}
}
}
/// Pause terminal input and output handling. Sends the cleanup
/// sequence to the terminal, and switches to cooked mode. Sends
/// a `resize` message with `None` to tell the app that output is
/// disabled.
///
/// This call should be used before forking off a process which
/// might prompt the user and receive user input, otherwise this
/// process would compete with the sub-process for user input.
/// Resume after the subprocess has finished with the `resume`
/// call.
pub fn | (&mut self, cx: CX![]) {
if!self.paused {
fwd!([self.resize], None);
self.glue.input(false);
self.termout.rw(cx).discard();
self.termout.rw(cx).bytes(&self.cleanup[..]);
self.termout.rw(cx).flush();
self.flush(cx);
self.paused = true;
self.update_panic_hook();
}
}
/// Resume terminal output and input handling. Switches to raw
/// mode and sends a resize message to trigger a full redraw.
pub fn resume(&mut self, cx: CX![]) {
if self.paused {
self.paused = false;
self.glue.input(true);
self.termout.rw(cx).discard();
self.handle_resize(cx);
self.update_panic_hook();
}
}
// Handle an unrecoverable failure. Try to clean up before
// terminating the actor.
fn failure(&mut self, cx: CX![], e: impl Error +'static) {
self.pause(cx);
cx.fail(e);
}
/// Flush to the terminal all the data that's ready for sending
/// from the TermOut buffer. Use [`TermOut::flush`] first to mark
/// the point up to which data should be flushed.
///
/// [`TermOut::flush`]: struct.TermOut.html#method.flush
pub fn flush(&mut self, cx: CX![]) {
if self.termout.rw(cx).new_cleanup.is_some() {
// Don't replace unless we're sure there's a new value
if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) {
self.cleanup = cleanup;
self.update_panic_hook();
}
}
if!self.disable_output {
if self.paused {
// Just drop the output whilst paused. We'll trigger
// a full refresh on resuming
self.termout.rw(cx).drain_flush();
} else {
let ob = self.termout.rw(cx);
let result = self.glue.write(ob.data_to_flush());
ob.drain_flush();
if let Err(e) = result {
self.disable_output = true;
self.failure(cx, e);
}
}
}
}
/// Handle a resize event from the TTY. Gets new size, and
/// notifies upstream.
pub(crate) fn handle_resize(&mut self, cx: CX![]) {
match self.glue.get_size() {
Ok((sy, sx)) => {
self.termout.rw(cx).set_size(sy, sx);
fwd!([self.resize], Some(self.termout.clone()));
}
Err(e) => self.failure(cx, e),
}
}
/// Handle an I/O error on the TTY input
pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) {
self.failure(cx, err);
}
/// Handle new bytes from the TTY input
pub(crate) fn handle_data_in(&mut self, cx: CX![]) {
self.glue.read_data(&mut self.inbuf);
self.do_data_in(cx, false);
}
fn do_data_in(&mut self, cx: CX![], force: bool) {
let mut pos = 0;
let len = self.inbuf.len();
if len!= 0 {
if!force {
// Note that this is too fast to catch M-Esc passed
// through screen, as that seems to apply a 300ms
// pause between the two Esc chars. For everything
// else including real terminals it should be okay.
timer_max!(
&mut self.force_timer,
cx.now() + Duration::from_millis(100),
[cx],
do_data_in(true)
);
}
while pos < len {
match Key::decode(&self.inbuf[pos..len], force) {
None => break,
Some((count, key)) => {
pos += count;
fwd!([self.input], key);
if self.check_enable {
let check_expiry = cx.now() + Duration::from_millis(300);
timer_max!(&mut self.check_timer, check_expiry, [cx], check_key());
}
}
}
}
}
self.inbuf.drain(..pos);
}
fn check_key(&mut self, _cx: CX![]) {
if self.check_enable {
fwd!([self.input], Key::Check);
}
}
// Install a panic hook that (if necessary) outputs the current
// cleanup string, restores cooked mode and then does the default
// panic action (e.g. dump out backtrace). This should be called
// every time we switch to/from raw mode, and every time the
// cleanup string is changed.
fn update_panic_hook(&mut self) {
// Discard old hook
let _ = std::panic::take_hook();
let defhook = self.panic_hook.clone();
if self.paused {
std::panic::set_hook(Box::new(move |info| defhook(info)));
} else {
let cleanup_fn = self.glue.cleanup_fn();
let cleanup = self.cleanup.clone();
std::panic::set_hook(Box::new(move |info| {
cleanup_fn(&cleanup[..]);
defhook(info);
}));
}
}
}
impl Drop for Terminal {
fn drop(&mut self) {
// Drop panic hook and clean up terminal
let _ = std::panic::take_hook();
if!self.paused {
self.glue.cleanup_fn()(&self.cleanup[..]);
}
}
}
| pause | identifier_name |
terminal.rs | use crate::os_glue::Glue;
use crate::{Features, Key, TermOut};
use stakker::{fwd, timer_max, Fwd, MaxTimerKey, Share, CX};
use std::error::Error;
use std::mem;
use std::panic::PanicInfo;
use std::sync::Arc;
use std::time::Duration;
/// Actor that manages the connection to the terminal
pub struct Terminal {
resize: Fwd<Option<Share<TermOut>>>,
input: Fwd<Key>,
termout: Share<TermOut>,
glue: Glue,
disable_output: bool,
paused: bool,
inbuf: Vec<u8>,
check_enable: bool,
force_timer: MaxTimerKey,
check_timer: MaxTimerKey,
cleanup: Vec<u8>,
panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send>>,
}
impl Terminal {
/// Set up the terminal. Sends a message back to `resize`
/// immediately, which provides a reference to the shared
/// [`TermOut`] which is used to buffer and flush terminal output
/// data.
///
/// Whenever the window size changes, a new `resize` message is
/// sent. When the terminal output is paused, `None` is sent to
/// `resize` to let the app know that there is no output available
/// right now.
///
/// Input keys received are sent to `input` once decoded.
///
/// In case of an error that can't be handled, cleans up the
/// terminal state and terminates the actor with
/// `ActorDied::Failed`. The actor that created the terminal can
/// catch that and do whatever cleanup is necessary before
/// aborting the process.
///
/// # Panic handling
///
/// When Rust panics, the terminal must be restored to its normal
/// state otherwise things would be left in a bad state for the
/// user (in cooked mode with no echo, requiring the user to
/// blindly type `reset` on the command-line). So this code saves
/// a copy of the current panic handler (using
/// `std::panic::take_hook`), and then installs its own handler
/// that does terminal cleanup before calling on to the saved
/// panic handler. This mean that if any custom panic handler is
/// needed by the application, then it must be set up before the
/// call to [`Terminal::init`].
///
/// [`TermOut`]: struct.TermOut.html
pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> {
// TODO: Query TERM/terminfo/environment for features to put in Features
let features = Features { colour_256: false };
let term = cx.this().clone();
let glue = match Glue::new(cx, term) {
Ok(v) => v,
Err(e) => {
cx.fail(e);
return None;
}
};
let termout = Share::new(cx, TermOut::new(features));
let mut this = Self {
resize,
input,
termout,
glue,
disable_output: false,
paused: false,
inbuf: Vec::new(),
check_enable: false,
force_timer: MaxTimerKey::default(),
check_timer: MaxTimerKey::default(),
cleanup: b"\x1Bc".to_vec(),
panic_hook: Arc::new(std::panic::take_hook()),
};
this.handle_resize(cx);
this.update_panic_hook();
Some(this)
}
/// Enable or disable generation of the [`Key::Check`] keypress,
/// which occurs in a gap in typing, 300ms after the last key
/// pressed. This may be used to do validation if that's too
/// expensive to do on every keypress.
///
/// [`Key::Check`]: enum.Key.html#variant.Check
pub fn check(&mut self, _cx: CX![], enable: bool) {
self.check_enable = enable;
}
/// Ring the bell (i.e. beep) immediately. Doesn't wait for the
/// buffered terminal data to be flushed. Will output even when
/// paused.
pub fn bell(&mut self, cx: CX![]) {
if!self.disable_output {
if let Err(e) = self.glue.write(&b"\x07"[..]) {
self.disable_output = true;
self.failure(cx, e);
}
}
}
/// Pause terminal input and output handling. Sends the cleanup
/// sequence to the terminal, and switches to cooked mode. Sends
/// a `resize` message with `None` to tell the app that output is
/// disabled.
///
/// This call should be used before forking off a process which
/// might prompt the user and receive user input, otherwise this
/// process would compete with the sub-process for user input.
/// Resume after the subprocess has finished with the `resume`
/// call.
pub fn pause(&mut self, cx: CX![]) {
if!self.paused |
}
/// Resume terminal output and input handling. Switches to raw
/// mode and sends a resize message to trigger a full redraw.
pub fn resume(&mut self, cx: CX![]) {
if self.paused {
self.paused = false;
self.glue.input(true);
self.termout.rw(cx).discard();
self.handle_resize(cx);
self.update_panic_hook();
}
}
// Handle an unrecoverable failure. Try to clean up before
// terminating the actor.
fn failure(&mut self, cx: CX![], e: impl Error +'static) {
self.pause(cx);
cx.fail(e);
}
/// Flush to the terminal all the data that's ready for sending
/// from the TermOut buffer. Use [`TermOut::flush`] first to mark
/// the point up to which data should be flushed.
///
/// [`TermOut::flush`]: struct.TermOut.html#method.flush
pub fn flush(&mut self, cx: CX![]) {
if self.termout.rw(cx).new_cleanup.is_some() {
// Don't replace unless we're sure there's a new value
if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) {
self.cleanup = cleanup;
self.update_panic_hook();
}
}
if!self.disable_output {
if self.paused {
// Just drop the output whilst paused. We'll trigger
// a full refresh on resuming
self.termout.rw(cx).drain_flush();
} else {
let ob = self.termout.rw(cx);
let result = self.glue.write(ob.data_to_flush());
ob.drain_flush();
if let Err(e) = result {
self.disable_output = true;
self.failure(cx, e);
}
}
}
}
/// Handle a resize event from the TTY. Gets new size, and
/// notifies upstream.
pub(crate) fn handle_resize(&mut self, cx: CX![]) {
match self.glue.get_size() {
Ok((sy, sx)) => {
self.termout.rw(cx).set_size(sy, sx);
fwd!([self.resize], Some(self.termout.clone()));
}
Err(e) => self.failure(cx, e),
}
}
/// Handle an I/O error on the TTY input
pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) {
self.failure(cx, err);
}
/// Handle new bytes from the TTY input
pub(crate) fn handle_data_in(&mut self, cx: CX![]) {
self.glue.read_data(&mut self.inbuf);
self.do_data_in(cx, false);
}
fn do_data_in(&mut self, cx: CX![], force: bool) {
let mut pos = 0;
let len = self.inbuf.len();
if len!= 0 {
if!force {
// Note that this is too fast to catch M-Esc passed
// through screen, as that seems to apply a 300ms
// pause between the two Esc chars. For everything
// else including real terminals it should be okay.
timer_max!(
&mut self.force_timer,
cx.now() + Duration::from_millis(100),
[cx],
do_data_in(true)
);
}
while pos < len {
match Key::decode(&self.inbuf[pos..len], force) {
None => break,
Some((count, key)) => {
pos += count;
fwd!([self.input], key);
if self.check_enable {
let check_expiry = cx.now() + Duration::from_millis(300);
timer_max!(&mut self.check_timer, check_expiry, [cx], check_key());
}
}
}
}
}
self.inbuf.drain(..pos);
}
fn check_key(&mut self, _cx: CX![]) {
if self.check_enable {
fwd!([self.input], Key::Check);
}
}
// Install a panic hook that (if necessary) outputs the current
// cleanup string, restores cooked mode and then does the default
// panic action (e.g. dump out backtrace). This should be called
// every time we switch to/from raw mode, and every time the
// cleanup string is changed.
fn update_panic_hook(&mut self) {
// Discard old hook
let _ = std::panic::take_hook();
let defhook = self.panic_hook.clone();
if self.paused {
std::panic::set_hook(Box::new(move |info| defhook(info)));
} else {
let cleanup_fn = self.glue.cleanup_fn();
let cleanup = self.cleanup.clone();
std::panic::set_hook(Box::new(move |info| {
cleanup_fn(&cleanup[..]);
defhook(info);
}));
}
}
}
impl Drop for Terminal {
fn drop(&mut self) {
// Drop panic hook and clean up terminal
let _ = std::panic::take_hook();
if!self.paused {
self.glue.cleanup_fn()(&self.cleanup[..]);
}
}
}
| {
fwd!([self.resize], None);
self.glue.input(false);
self.termout.rw(cx).discard();
self.termout.rw(cx).bytes(&self.cleanup[..]);
self.termout.rw(cx).flush();
self.flush(cx);
self.paused = true;
self.update_panic_hook();
} | conditional_block |
mod.rs |
fn new(key: &CVWords, chunk_counter: u64, flags: u8) -> Self {
Self {
k: *key,
t: [counter_low(chunk_counter), counter_high(chunk_counter)],
d: flags.into(),
}
}
fn plus_chunks(&self, chunks: u64) -> Self {
let t = self.chunk_counter() + chunks;
Self {
k: self.k,
t: [counter_low(t), counter_high(t)],
d: self.d,
}
}
#[inline]
fn key(&self) -> &CVWords {
&self.k
}
#[inline]
fn chunk_counter(&self) -> u64 {
self.t[0] as u64 | (self.t[1] as u64) << 32
}
#[inline]
fn flags(&self) -> u8 {
self.d as u8
}
/// Returns the bytes to be copied to the control uniform in the GPU.
///
/// The contents of the returned slice are opaque and should be interpreted
/// only by the shader.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
// According to the specification, the host and the device must have
// the same endianness, so no endian conversion is necessary even on
// big-endian hosts.
debug_assert_eq!(
mem::size_of_val(self),
shaders::blake3::CONTROL_UNIFORM_SIZE,
"must not have padding"
);
unsafe { slice::from_raw_parts(self as *const Self as *const u8, mem::size_of_val(self)) }
}
}
// Variant of compress_subtree_wide which takes parents as input.
fn compress_parents_wide<J: Join>(
input: &[u8],
key: &CVWords,
flags: u8,
platform: Platform,
out: &mut [u8],
) -> usize {
debug_assert!(input.len().is_power_of_two());
// Note that the single block case does *not* bump the SIMD degree up to 2
// when it is 1. This allows Rayon the option of multi-threading even the
// 2-block case, which can help performance on smaller platforms.
if input.len() <= platform.simd_degree() * BLOCK_LEN {
return compress_parents_parallel(input, key, flags, platform, out);
}
// With more than simd_degree blocks, we need to recurse. Start by dividing
// the input into left and right subtrees. (Note that this is only optimal
// as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
// of 3 or something, we'll need a more complicated strategy.)
debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
let (left, right) = input.split_at(input.len() / 2);
// Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
// account for the special case of returning 2 outputs when the SIMD degree
// is 1.
let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
let degree = if left.len() == BLOCK_LEN {
// The "simd_degree=1 and we're at the leaf nodes" case.
debug_assert_eq!(platform.simd_degree(), 1);
1
} else {
cmp::max(platform.simd_degree(), 2)
};
let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
// Recurse! This uses multiple threads if the "rayon" feature is enabled.
let (left_n, right_n) = J::join(
|| compress_parents_wide::<J>(left, key, flags, platform, left_out),
|| compress_parents_wide::<J>(right, key, flags, platform, right_out),
left.len(),
right.len(),
);
// The special case again. If simd_degree=1, then we'll have left_n=1 and
// right_n=1. Rather than compressing them into a single output, return
// them directly, to make sure we always have at least two outputs.
debug_assert_eq!(left_n, degree);
debug_assert!(right_n >= 1 && right_n <= left_n);
if left_n == 1 {
out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
return 2;
}
// Otherwise, do one layer of parent node compression.
let num_children = left_n + right_n;
compress_parents_parallel(
&cv_array[..num_children * OUT_LEN],
key,
flags,
platform,
out,
)
}
// Variant of compress_subtree_to_parent_node which takes parents as input.
fn compress_parents_to_parent_node<J: Join>(
input: &[u8],
key: &CVWords,
flags: u8,
platform: Platform,
) -> [u8; BLOCK_LEN] {
debug_assert!(input.len() > BLOCK_LEN);
let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
let mut num_cvs = compress_parents_wide::<J>(input, &key, flags, platform, &mut cv_array);
debug_assert!(num_cvs >= 2);
// If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
// compress_parents_wide() returns more than 2 chaining values. Condense
// them into 2 by forming parent nodes repeatedly.
let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
while num_cvs > 2 {
let cv_slice = &cv_array[..num_cvs * OUT_LEN];
num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
}
*array_ref!(cv_array, 0, 2 * OUT_LEN)
}
/// GPU-accelerated Hasher.
///
/// This is a wrapper around a [`Hasher`] which also allows exporting the key
/// and flags to be used by a GPU shader, and importing the shader's result.
///
/// This wrapper should be used with care, since incorrect use can lead to a
/// wrong hash output. It also allows extracting the key from the state, which
/// would otherwise not be allowed in safe code.
///
/// This wrapper can be freely converted to its inner [`Hasher`], through the
/// `Deref`, `DerefMut`, and `Into` traits. Prefer to use the inner [`Hasher`]
/// wherever the extra functionality from this wrapper is not needed.
///
/// [`Hasher`]:../struct.Hasher.html
#[derive(Clone, Debug, Default)]
pub struct GpuHasher {
inner: Hasher,
}
impl GpuHasher {
/// Wrapper for [`Hasher::new`](../struct.Hasher.html#method.new).
#[inline]
pub fn new() -> Self {
Self {
inner: Hasher::new(),
}
}
/// Wrapper for [`Hasher::new_keyed`](../struct.Hasher.html#method.new_keyed).
#[inline]
pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
Self {
inner: Hasher::new_keyed(key),
}
}
/// Wrapper for [`Hasher::new_derive_key`](../struct.Hasher.html#method.new_derive_key).
#[inline]
pub fn new_derive_key(context: &str) -> Self {
Self {
inner: Hasher::new_derive_key(context),
}
}
/// Obtain the [`GpuControl`](struct.GpuControl.html) to hash full chunks starting with `chunk_counter`
/// or parent nodes.
pub fn gpu_control(&self, chunk_counter: u64) -> GpuControl {
GpuControl::new(&self.key, chunk_counter, self.chunk_state.flags)
}
/// GPU-accelerated version of [`update_with_join`].
///
/// Unlike [`update_with_join`], this method receives the parents computed
/// by one or more applications of the BLAKE3 shader.
///
/// This method has several restrictions. The size of the shader input must
/// be a power of two, it must be naturally aligned within the hash input,
/// and the hasher state must not have any leftover bytes in its internal
/// buffers. The simplest way to follow these invariants is to use this
/// method, with the same chunk count and buffer size, for all of the input
/// except for a variable-sized tail, which can use [`update_with_join`] or
/// [`update`].
///
/// Note: the chunk counter is implicit in this method, but it must be the | ///
/// [`update`]: #method.update
/// [`update_with_join`]: #method.update_with_join
/// [`GpuControl`]: struct.GpuControl.html
pub fn update_from_gpu<J: Join>(&mut self, chunk_count: u64, parents: &mut [u8]) -> &mut Self {
assert_eq!(self.chunk_state.len(), 0, "leftover buffered bytes");
let chunk_counter = self.chunk_state.chunk_counter;
// These three checks make sure the increment of t0 in the shader did not overflow.
assert!(chunk_count.is_power_of_two(), "bad chunk count");
assert!(chunk_count <= (1 << 32), "chunk count overflow");
assert_eq!(chunk_counter % chunk_count, 0, "misaligned hash");
assert_eq!(parents.len() % OUT_LEN, 0, "invalid hash size");
let parent_count = (parents.len() / OUT_LEN) as u64;
assert_eq!(chunk_count % parent_count, 0, "invalid child count");
// The lazy merge of the CV stack needs at least 2 inputs.
// And compress_parents_to_parent_node needs at least 2 blocks.
assert!(parent_count > 2, "invalid parent count");
// The shader inputs and outputs are 32-bit words, which are in native byte order.
// The chunk shader byte swaps its input, but neither shader byte swaps its output.
// Since the rest of the code assumes little endian, byte swap the buffer here.
Self::swap_endian::<J>(parents);
let cv_pair = compress_parents_to_parent_node::<J>(
parents,
&self.key,
self.chunk_state.flags,
self.chunk_state.platform,
);
let left_cv = array_ref!(cv_pair, 0, 32);
let right_cv = array_ref!(cv_pair, 32, 32);
// Push the two CVs we received into the CV stack in order. Because
// the stack merges lazily, this guarantees we aren't merging the
// root.
self.push_cv(left_cv, chunk_counter);
self.push_cv(right_cv, chunk_counter + (chunk_count / 2));
self.chunk_state.chunk_counter += chunk_count;
self
}
// CPU simulation of the BLAKE3 chunk shader.
//
// This can be used to test the real shader.
//
// Note: unlike the real shader, this simulation always uses little-endian
// inputs and outputs.
#[doc(hidden)]
pub fn simulate_chunk_shader<J: Join>(
&self,
count: usize,
input: &[u8],
output: &mut [u8],
control: &GpuControl,
) {
assert_eq!(input.len(), count * CHUNK_LEN, "invalid input size");
assert_eq!(output.len(), count * OUT_LEN, "invalid output size");
if count > self.chunk_state.platform.simd_degree() {
let mid = count / 2;
let (left_in, right_in) = input.split_at(mid * CHUNK_LEN);
let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN);
let control_r = control.plus_chunks(mid as u64);
J::join(
|| self.simulate_chunk_shader::<J>(mid, left_in, left_out, control),
|| self.simulate_chunk_shader::<J>(count - mid, right_in, right_out, &control_r),
left_in.len(),
right_in.len(),
);
} else if count > 0 {
let mut chunks = ArrayVec::<[&[u8; CHUNK_LEN]; MAX_SIMD_DEGREE]>::new();
for chunk in input.chunks_exact(CHUNK_LEN) {
chunks.push(array_ref!(chunk, 0, CHUNK_LEN));
}
self.chunk_state.platform.hash_many(
&chunks,
control.key(),
control.chunk_counter(),
IncrementCounter::Yes,
control.flags(),
CHUNK_START,
CHUNK_END,
output,
);
}
}
// CPU simulation of the BLAKE3 parent shader.
//
// This can be used to test the real shader.
//
// Note: unlike the real shader, this simulation always uses little-endian
// inputs and outputs.
#[doc(hidden)]
pub fn simulate_parent_shader<J: Join>(
&self,
count: usize,
input: &[u8],
output: &mut [u8],
control: &GpuControl,
) {
assert_eq!(input.len(), count * BLOCK_LEN, "invalid input size");
assert_eq!(output.len(), count * OUT_LEN, "invalid output size");
if count > self.chunk_state.platform.simd_degree() {
let mid = count / 2;
let (left_in, right_in) = input.split_at(mid * BLOCK_LEN);
let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN);
let control_r = control.plus_chunks(mid as u64);
J::join(
|| self.simulate_parent_shader::<J>(mid, left_in, left_out, control),
|| self.simulate_parent_shader::<J>(count - mid, right_in, right_out, &control_r),
left_in.len(),
right_in.len(),
);
} else if count > 0 {
let mut parents = ArrayVec::<[&[u8; BLOCK_LEN]; MAX_SIMD_DEGREE]>::new();
for parent in input.chunks_exact(BLOCK_LEN) {
parents.push(array_ref!(parent, 0, BLOCK_LEN));
}
self.chunk_state.platform.hash_many(
&parents,
control.key(),
0,
IncrementCounter::No,
control.flags() | PARENT,
0,
0,
output,
);
}
}
#[doc(hidden)]
#[cfg(target_endian = "big")]
pub fn swap_endian<J: Join>(buffer: &mut [u8]) {
debug_assert!(buffer.len().is_power_of_two(), "invalid buffer size");
debug_assert_eq!(buffer.len() % OUT_LEN, 0, "invalid buffer size");
if buffer.len() > OUT_LEN {
let (left, right) = buffer.split_at_mut(buffer.len() / 2);
let left_len = left.len();
let right_len = right.len();
J::join(
|| Self::swap_endian::<J>(left),
|| Self::swap_endian::<J>(right),
left_len,
right_len,
);
} else {
for buf in buffer.chunks_exact_mut(4) {
buf.swap(0, 3);
buf.swap(1, 2);
}
}
}
#[doc(hidden)]
#[inline(always)]
#[cfg(target_endian = "little")]
pub fn swap_endian<J: Join>(_buffer: &mut [u8]) {}
}
impl Deref for GpuHasher {
type Target = Hasher;
#[inline]
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for GpuHasher {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl From<GpuHasher> for Hasher {
#[inline]
fn from(hasher: GpuHasher) -> Hasher {
hasher.inner
}
}
/// SPIR-V shader modules.
pub mod shaders {
/// Shader module for one level of the BLAKE3 tree.
pub mod blake3 {
/// Returns the SPIR-V code for the chunk shader module.
#[cfg(target_endian = "big")]
pub fn chunk_shader() -> &'static [u8] {
include_bytes!("shaders/blake3-chunk-be.spv")
}
/// Returns the SPIR-V code for the chunk shader module.
#[cfg(target_endian = "little")]
pub fn chunk_shader() -> &'static [u8] {
include_bytes!("shaders/blake3-chunk-le.spv")
}
/// Returns the SPIR-V code for the parent shader module.
pub fn parent_shader() -> &'static [u8] {
include_bytes!("shaders/blake3-parent.spv")
}
/// The local workgroup size.
pub const WORKGROUP_SIZE: usize = 128;
/// The descriptor binding for the input buffer.
pub const INPUT_BUFFER_BINDING: u32 = 0;
/// The descriptor binding for the output buffer.
pub const OUTPUT_BUFFER_BINDING: u32 = 1;
/// The size of the control uniform.
pub const CONTROL_UNIFORM_SIZE: usize = 11 * 4;
}
}
#[cfg(test)]
#[cfg(feature = "std")]
mod tests {
use super::*;
fn selftest_seq(len: usize) -> Vec<u8> {
let seed = len as u32;
let mut out = Vec::with_capacity(len);
let mut a = seed.wrapping_mul(0xDEAD4BAD);
let mut b = 1;
for _ in 0..len {
let t = a.wrapping_add(b);
a = b;
b = t;
out.push((t >> 24) as u8);
}
out
}
#[cfg(not(feature = "rayon"))]
type Join = join::SerialJoin;
#[cfg(feature = "rayon")]
type Join = join::RayonJoin;
#[test]
fn simulate_shader_one_level_once() {
let len = CHUNK_LEN * 128;
let input = selftest_seq(len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(128, &input, &mut buffer, &hasher.gpu_control(0));
GpuHasher::swap_endian::<Join>(&mut buffer);
hasher.update_from_gpu::<Join>(128, &mut buffer);
assert_eq!(hasher.finalize(), expected);
}
#[test]
fn simulate_shader_one_level_twice() {
let len = CHUNK_LEN * 128;
let input = selftest_seq(2 * len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(
128,
&input[..len],
&mut buffer,
&hasher.gpu_control(0),
);
GpuHasher::swap_endian::<Join>(&mut buffer);
hasher.update_from_gpu::<Join>(128, &mut buffer);
hasher.simulate_chunk_shader::<Join>(
128,
&input[len..],
&mut buffer,
&hasher.gpu_control(128),
);
GpuHasher::swap_endian::<Join>(&mut buffer);
hasher.update_from_gpu::<Join>(128, &mut buffer);
assert_eq!(hasher.finalize(), expected);
}
#[test]
fn simulate_shader_two_levels_once() {
let len = 2 * CHUNK_LEN * 128;
let input = selftest_seq(len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer1 = vec![0; 2 * OUT_LEN * 128];
let mut buffer2 = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(2 * 128, &input, &mut buffer1, &hasher.gpu_control(0));
hasher.simulate_parent_shader::<Join>(128, &buffer1, &mut buffer2, &hasher.gpu_control(0));
GpuHasher::swap_endian::<Join>(&mut buffer2);
hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2);
assert_eq!(hasher.finalize(), expected);
}
#[test]
fn simulate_shader_two_levels_twice() {
let len = 2 * CHUNK_LEN * 128;
let input = selftest_seq(2 * len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer1 = vec![0; 2 * OUT_LEN * 128];
let mut buffer2 = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(
2 * 128,
| /// same as the chunk counter in the [`GpuControl`] passed to the shader,
/// otherwise it will lead to a wrong hash output.
///
/// Note: on a big-endian host, this method will swap the endianness of the
/// shader output in-place. | random_line_split |
mod.rs | fn new(key: &CVWords, chunk_counter: u64, flags: u8) -> Self {
Self {
k: *key,
t: [counter_low(chunk_counter), counter_high(chunk_counter)],
d: flags.into(),
}
}
fn plus_chunks(&self, chunks: u64) -> Self {
let t = self.chunk_counter() + chunks;
Self {
k: self.k,
t: [counter_low(t), counter_high(t)],
d: self.d,
}
}
#[inline]
fn key(&self) -> &CVWords {
&self.k
}
#[inline]
fn chunk_counter(&self) -> u64 {
self.t[0] as u64 | (self.t[1] as u64) << 32
}
#[inline]
fn flags(&self) -> u8 {
self.d as u8
}
/// Returns the bytes to be copied to the control uniform in the GPU.
///
/// The contents of the returned slice are opaque and should be interpreted
/// only by the shader.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
// According to the specification, the host and the device must have
// the same endianness, so no endian conversion is necessary even on
// big-endian hosts.
debug_assert_eq!(
mem::size_of_val(self),
shaders::blake3::CONTROL_UNIFORM_SIZE,
"must not have padding"
);
unsafe { slice::from_raw_parts(self as *const Self as *const u8, mem::size_of_val(self)) }
}
}
// Variant of compress_subtree_wide which takes parents as input.
fn compress_parents_wide<J: Join>(
input: &[u8],
key: &CVWords,
flags: u8,
platform: Platform,
out: &mut [u8],
) -> usize {
debug_assert!(input.len().is_power_of_two());
// Note that the single block case does *not* bump the SIMD degree up to 2
// when it is 1. This allows Rayon the option of multi-threading even the
// 2-block case, which can help performance on smaller platforms.
if input.len() <= platform.simd_degree() * BLOCK_LEN {
return compress_parents_parallel(input, key, flags, platform, out);
}
// With more than simd_degree blocks, we need to recurse. Start by dividing
// the input into left and right subtrees. (Note that this is only optimal
// as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
// of 3 or something, we'll need a more complicated strategy.)
debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
let (left, right) = input.split_at(input.len() / 2);
// Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
// account for the special case of returning 2 outputs when the SIMD degree
// is 1.
let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
let degree = if left.len() == BLOCK_LEN {
// The "simd_degree=1 and we're at the leaf nodes" case.
debug_assert_eq!(platform.simd_degree(), 1);
1
} else {
cmp::max(platform.simd_degree(), 2)
};
let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
// Recurse! This uses multiple threads if the "rayon" feature is enabled.
let (left_n, right_n) = J::join(
|| compress_parents_wide::<J>(left, key, flags, platform, left_out),
|| compress_parents_wide::<J>(right, key, flags, platform, right_out),
left.len(),
right.len(),
);
// The special case again. If simd_degree=1, then we'll have left_n=1 and
// right_n=1. Rather than compressing them into a single output, return
// them directly, to make sure we always have at least two outputs.
debug_assert_eq!(left_n, degree);
debug_assert!(right_n >= 1 && right_n <= left_n);
if left_n == 1 {
out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
return 2;
}
// Otherwise, do one layer of parent node compression.
let num_children = left_n + right_n;
compress_parents_parallel(
&cv_array[..num_children * OUT_LEN],
key,
flags,
platform,
out,
)
}
// Variant of compress_subtree_to_parent_node which takes parents as input.
fn compress_parents_to_parent_node<J: Join>(
input: &[u8],
key: &CVWords,
flags: u8,
platform: Platform,
) -> [u8; BLOCK_LEN] {
debug_assert!(input.len() > BLOCK_LEN);
let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
let mut num_cvs = compress_parents_wide::<J>(input, &key, flags, platform, &mut cv_array);
debug_assert!(num_cvs >= 2);
// If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
// compress_parents_wide() returns more than 2 chaining values. Condense
// them into 2 by forming parent nodes repeatedly.
let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
while num_cvs > 2 {
let cv_slice = &cv_array[..num_cvs * OUT_LEN];
num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
}
*array_ref!(cv_array, 0, 2 * OUT_LEN)
}
/// GPU-accelerated Hasher.
///
/// This is a wrapper around a [`Hasher`] which also allows exporting the key
/// and flags to be used by a GPU shader, and importing the shader's result.
///
/// This wrapper should be used with care, since incorrect use can lead to a
/// wrong hash output. It also allows extracting the key from the state, which
/// would otherwise not be allowed in safe code.
///
/// This wrapper can be freely converted to its inner [`Hasher`], through the
/// `Deref`, `DerefMut`, and `Into` traits. Prefer to use the inner [`Hasher`]
/// wherever the extra functionality from this wrapper is not needed.
///
/// [`Hasher`]:../struct.Hasher.html
#[derive(Clone, Debug, Default)]
pub struct GpuHasher {
inner: Hasher,
}
impl GpuHasher {
/// Wrapper for [`Hasher::new`](../struct.Hasher.html#method.new).
#[inline]
pub fn new() -> Self {
Self {
inner: Hasher::new(),
}
}
/// Wrapper for [`Hasher::new_keyed`](../struct.Hasher.html#method.new_keyed).
#[inline]
pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
Self {
inner: Hasher::new_keyed(key),
}
}
/// Wrapper for [`Hasher::new_derive_key`](../struct.Hasher.html#method.new_derive_key).
#[inline]
pub fn new_derive_key(context: &str) -> Self {
Self {
inner: Hasher::new_derive_key(context),
}
}
/// Obtain the [`GpuControl`](struct.GpuControl.html) to hash full chunks starting with `chunk_counter`
/// or parent nodes.
pub fn gpu_control(&self, chunk_counter: u64) -> GpuControl {
GpuControl::new(&self.key, chunk_counter, self.chunk_state.flags)
}
/// GPU-accelerated version of [`update_with_join`].
///
/// Unlike [`update_with_join`], this method receives the parents computed
/// by one or more applications of the BLAKE3 shader.
///
/// This method has several restrictions. The size of the shader input must
/// be a power of two, it must be naturally aligned within the hash input,
/// and the hasher state must not have any leftover bytes in its internal
/// buffers. The simplest way to follow these invariants is to use this
/// method, with the same chunk count and buffer size, for all of the input
/// except for a variable-sized tail, which can use [`update_with_join`] or
/// [`update`].
///
/// Note: the chunk counter is implicit in this method, but it must be the
/// same as the chunk counter in the [`GpuControl`] passed to the shader,
/// otherwise it will lead to a wrong hash output.
///
/// Note: on a big-endian host, this method will swap the endianness of the
/// shader output in-place.
///
/// [`update`]: #method.update
/// [`update_with_join`]: #method.update_with_join
/// [`GpuControl`]: struct.GpuControl.html
pub fn update_from_gpu<J: Join>(&mut self, chunk_count: u64, parents: &mut [u8]) -> &mut Self {
assert_eq!(self.chunk_state.len(), 0, "leftover buffered bytes");
let chunk_counter = self.chunk_state.chunk_counter;
// These three checks make sure the increment of t0 in the shader did not overflow.
assert!(chunk_count.is_power_of_two(), "bad chunk count");
assert!(chunk_count <= (1 << 32), "chunk count overflow");
assert_eq!(chunk_counter % chunk_count, 0, "misaligned hash");
assert_eq!(parents.len() % OUT_LEN, 0, "invalid hash size");
let parent_count = (parents.len() / OUT_LEN) as u64;
assert_eq!(chunk_count % parent_count, 0, "invalid child count");
// The lazy merge of the CV stack needs at least 2 inputs.
// And compress_parents_to_parent_node needs at least 2 blocks.
assert!(parent_count > 2, "invalid parent count");
// The shader inputs and outputs are 32-bit words, which are in native byte order.
// The chunk shader byte swaps its input, but neither shader byte swaps its output.
// Since the rest of the code assumes little endian, byte swap the buffer here.
Self::swap_endian::<J>(parents);
let cv_pair = compress_parents_to_parent_node::<J>(
parents,
&self.key,
self.chunk_state.flags,
self.chunk_state.platform,
);
let left_cv = array_ref!(cv_pair, 0, 32);
let right_cv = array_ref!(cv_pair, 32, 32);
// Push the two CVs we received into the CV stack in order. Because
// the stack merges lazily, this guarantees we aren't merging the
// root.
self.push_cv(left_cv, chunk_counter);
self.push_cv(right_cv, chunk_counter + (chunk_count / 2));
self.chunk_state.chunk_counter += chunk_count;
self
}
// CPU simulation of the BLAKE3 chunk shader.
//
// This can be used to test the real shader.
//
// Note: unlike the real shader, this simulation always uses little-endian
// inputs and outputs.
#[doc(hidden)]
pub fn simulate_chunk_shader<J: Join>(
&self,
count: usize,
input: &[u8],
output: &mut [u8],
control: &GpuControl,
) {
assert_eq!(input.len(), count * CHUNK_LEN, "invalid input size");
assert_eq!(output.len(), count * OUT_LEN, "invalid output size");
if count > self.chunk_state.platform.simd_degree() {
let mid = count / 2;
let (left_in, right_in) = input.split_at(mid * CHUNK_LEN);
let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN);
let control_r = control.plus_chunks(mid as u64);
J::join(
|| self.simulate_chunk_shader::<J>(mid, left_in, left_out, control),
|| self.simulate_chunk_shader::<J>(count - mid, right_in, right_out, &control_r),
left_in.len(),
right_in.len(),
);
} else if count > 0 {
let mut chunks = ArrayVec::<[&[u8; CHUNK_LEN]; MAX_SIMD_DEGREE]>::new();
for chunk in input.chunks_exact(CHUNK_LEN) {
chunks.push(array_ref!(chunk, 0, CHUNK_LEN));
}
self.chunk_state.platform.hash_many(
&chunks,
control.key(),
control.chunk_counter(),
IncrementCounter::Yes,
control.flags(),
CHUNK_START,
CHUNK_END,
output,
);
}
}
// CPU simulation of the BLAKE3 parent shader.
//
// This can be used to test the real shader.
//
// Note: unlike the real shader, this simulation always uses little-endian
// inputs and outputs.
#[doc(hidden)]
pub fn simulate_parent_shader<J: Join>(
&self,
count: usize,
input: &[u8],
output: &mut [u8],
control: &GpuControl,
) {
assert_eq!(input.len(), count * BLOCK_LEN, "invalid input size");
assert_eq!(output.len(), count * OUT_LEN, "invalid output size");
if count > self.chunk_state.platform.simd_degree() {
let mid = count / 2;
let (left_in, right_in) = input.split_at(mid * BLOCK_LEN);
let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN);
let control_r = control.plus_chunks(mid as u64);
J::join(
|| self.simulate_parent_shader::<J>(mid, left_in, left_out, control),
|| self.simulate_parent_shader::<J>(count - mid, right_in, right_out, &control_r),
left_in.len(),
right_in.len(),
);
} else if count > 0 {
let mut parents = ArrayVec::<[&[u8; BLOCK_LEN]; MAX_SIMD_DEGREE]>::new();
for parent in input.chunks_exact(BLOCK_LEN) {
parents.push(array_ref!(parent, 0, BLOCK_LEN));
}
self.chunk_state.platform.hash_many(
&parents,
control.key(),
0,
IncrementCounter::No,
control.flags() | PARENT,
0,
0,
output,
);
}
}
#[doc(hidden)]
#[cfg(target_endian = "big")]
pub fn swap_endian<J: Join>(buffer: &mut [u8]) {
debug_assert!(buffer.len().is_power_of_two(), "invalid buffer size");
debug_assert_eq!(buffer.len() % OUT_LEN, 0, "invalid buffer size");
if buffer.len() > OUT_LEN {
let (left, right) = buffer.split_at_mut(buffer.len() / 2);
let left_len = left.len();
let right_len = right.len();
J::join(
|| Self::swap_endian::<J>(left),
|| Self::swap_endian::<J>(right),
left_len,
right_len,
);
} else {
for buf in buffer.chunks_exact_mut(4) {
buf.swap(0, 3);
buf.swap(1, 2);
}
}
}
#[doc(hidden)]
#[inline(always)]
#[cfg(target_endian = "little")]
pub fn swap_endian<J: Join>(_buffer: &mut [u8]) {}
}
impl Deref for GpuHasher {
type Target = Hasher;
#[inline]
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for GpuHasher {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl From<GpuHasher> for Hasher {
#[inline]
fn from(hasher: GpuHasher) -> Hasher {
hasher.inner
}
}
/// SPIR-V shader modules.
pub mod shaders {
/// Shader module for one level of the BLAKE3 tree.
pub mod blake3 {
/// Returns the SPIR-V code for the chunk shader module.
#[cfg(target_endian = "big")]
pub fn chunk_shader() -> &'static [u8] |
/// Returns the SPIR-V code for the chunk shader module.
#[cfg(target_endian = "little")]
pub fn chunk_shader() -> &'static [u8] {
include_bytes!("shaders/blake3-chunk-le.spv")
}
/// Returns the SPIR-V code for the parent shader module.
pub fn parent_shader() -> &'static [u8] {
include_bytes!("shaders/blake3-parent.spv")
}
/// The local workgroup size.
pub const WORKGROUP_SIZE: usize = 128;
/// The descriptor binding for the input buffer.
pub const INPUT_BUFFER_BINDING: u32 = 0;
/// The descriptor binding for the output buffer.
pub const OUTPUT_BUFFER_BINDING: u32 = 1;
/// The size of the control uniform.
pub const CONTROL_UNIFORM_SIZE: usize = 11 * 4;
}
}
#[cfg(test)]
#[cfg(feature = "std")]
mod tests {
use super::*;
fn selftest_seq(len: usize) -> Vec<u8> {
let seed = len as u32;
let mut out = Vec::with_capacity(len);
let mut a = seed.wrapping_mul(0xDEAD4BAD);
let mut b = 1;
for _ in 0..len {
let t = a.wrapping_add(b);
a = b;
b = t;
out.push((t >> 24) as u8);
}
out
}
#[cfg(not(feature = "rayon"))]
type Join = join::SerialJoin;
#[cfg(feature = "rayon")]
type Join = join::RayonJoin;
#[test]
fn simulate_shader_one_level_once() {
let len = CHUNK_LEN * 128;
let input = selftest_seq(len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(128, &input, &mut buffer, &hasher.gpu_control(0));
GpuHasher::swap_endian::<Join>(&mut buffer);
hasher.update_from_gpu::<Join>(128, &mut buffer);
assert_eq!(hasher.finalize(), expected);
}
#[test]
fn simulate_shader_one_level_twice() {
let len = CHUNK_LEN * 128;
let input = selftest_seq(2 * len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(
128,
&input[..len],
&mut buffer,
&hasher.gpu_control(0),
);
GpuHasher::swap_endian::<Join>(&mut buffer);
hasher.update_from_gpu::<Join>(128, &mut buffer);
hasher.simulate_chunk_shader::<Join>(
128,
&input[len..],
&mut buffer,
&hasher.gpu_control(128),
);
GpuHasher::swap_endian::<Join>(&mut buffer);
hasher.update_from_gpu::<Join>(128, &mut buffer);
assert_eq!(hasher.finalize(), expected);
}
#[test]
fn simulate_shader_two_levels_once() {
let len = 2 * CHUNK_LEN * 128;
let input = selftest_seq(len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer1 = vec![0; 2 * OUT_LEN * 128];
let mut buffer2 = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(2 * 128, &input, &mut buffer1, &hasher.gpu_control(0));
hasher.simulate_parent_shader::<Join>(128, &buffer1, &mut buffer2, &hasher.gpu_control(0));
GpuHasher::swap_endian::<Join>(&mut buffer2);
hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2);
assert_eq!(hasher.finalize(), expected);
}
#[test]
fn simulate_shader_two_levels_twice() {
let len = 2 * CHUNK_LEN * 128;
let input = selftest_seq(2 * len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer1 = vec![0; 2 * OUT_LEN * 128];
let mut buffer2 = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(
2 * 128,
| {
include_bytes!("shaders/blake3-chunk-be.spv")
} | identifier_body |
mod.rs | fn new(key: &CVWords, chunk_counter: u64, flags: u8) -> Self {
Self {
k: *key,
t: [counter_low(chunk_counter), counter_high(chunk_counter)],
d: flags.into(),
}
}
fn plus_chunks(&self, chunks: u64) -> Self {
let t = self.chunk_counter() + chunks;
Self {
k: self.k,
t: [counter_low(t), counter_high(t)],
d: self.d,
}
}
#[inline]
fn key(&self) -> &CVWords {
&self.k
}
#[inline]
fn chunk_counter(&self) -> u64 {
self.t[0] as u64 | (self.t[1] as u64) << 32
}
#[inline]
fn | (&self) -> u8 {
self.d as u8
}
/// Returns the bytes to be copied to the control uniform in the GPU.
///
/// The contents of the returned slice are opaque and should be interpreted
/// only by the shader.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
// According to the specification, the host and the device must have
// the same endianness, so no endian conversion is necessary even on
// big-endian hosts.
debug_assert_eq!(
mem::size_of_val(self),
shaders::blake3::CONTROL_UNIFORM_SIZE,
"must not have padding"
);
unsafe { slice::from_raw_parts(self as *const Self as *const u8, mem::size_of_val(self)) }
}
}
// Variant of compress_subtree_wide which takes parents as input.
fn compress_parents_wide<J: Join>(
input: &[u8],
key: &CVWords,
flags: u8,
platform: Platform,
out: &mut [u8],
) -> usize {
debug_assert!(input.len().is_power_of_two());
// Note that the single block case does *not* bump the SIMD degree up to 2
// when it is 1. This allows Rayon the option of multi-threading even the
// 2-block case, which can help performance on smaller platforms.
if input.len() <= platform.simd_degree() * BLOCK_LEN {
return compress_parents_parallel(input, key, flags, platform, out);
}
// With more than simd_degree blocks, we need to recurse. Start by dividing
// the input into left and right subtrees. (Note that this is only optimal
// as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
// of 3 or something, we'll need a more complicated strategy.)
debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
let (left, right) = input.split_at(input.len() / 2);
// Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
// account for the special case of returning 2 outputs when the SIMD degree
// is 1.
let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
let degree = if left.len() == BLOCK_LEN {
// The "simd_degree=1 and we're at the leaf nodes" case.
debug_assert_eq!(platform.simd_degree(), 1);
1
} else {
cmp::max(platform.simd_degree(), 2)
};
let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
// Recurse! This uses multiple threads if the "rayon" feature is enabled.
let (left_n, right_n) = J::join(
|| compress_parents_wide::<J>(left, key, flags, platform, left_out),
|| compress_parents_wide::<J>(right, key, flags, platform, right_out),
left.len(),
right.len(),
);
// The special case again. If simd_degree=1, then we'll have left_n=1 and
// right_n=1. Rather than compressing them into a single output, return
// them directly, to make sure we always have at least two outputs.
debug_assert_eq!(left_n, degree);
debug_assert!(right_n >= 1 && right_n <= left_n);
if left_n == 1 {
out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
return 2;
}
// Otherwise, do one layer of parent node compression.
let num_children = left_n + right_n;
compress_parents_parallel(
&cv_array[..num_children * OUT_LEN],
key,
flags,
platform,
out,
)
}
// Variant of compress_subtree_to_parent_node which takes parents as input.
fn compress_parents_to_parent_node<J: Join>(
input: &[u8],
key: &CVWords,
flags: u8,
platform: Platform,
) -> [u8; BLOCK_LEN] {
debug_assert!(input.len() > BLOCK_LEN);
let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
let mut num_cvs = compress_parents_wide::<J>(input, &key, flags, platform, &mut cv_array);
debug_assert!(num_cvs >= 2);
// If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
// compress_parents_wide() returns more than 2 chaining values. Condense
// them into 2 by forming parent nodes repeatedly.
let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
while num_cvs > 2 {
let cv_slice = &cv_array[..num_cvs * OUT_LEN];
num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
}
*array_ref!(cv_array, 0, 2 * OUT_LEN)
}
/// GPU-accelerated Hasher.
///
/// This is a wrapper around a [`Hasher`] which also allows exporting the key
/// and flags to be used by a GPU shader, and importing the shader's result.
///
/// This wrapper should be used with care, since incorrect use can lead to a
/// wrong hash output. It also allows extracting the key from the state, which
/// would otherwise not be allowed in safe code.
///
/// This wrapper can be freely converted to its inner [`Hasher`], through the
/// `Deref`, `DerefMut`, and `Into` traits. Prefer to use the inner [`Hasher`]
/// wherever the extra functionality from this wrapper is not needed.
///
/// [`Hasher`]:../struct.Hasher.html
#[derive(Clone, Debug, Default)]
pub struct GpuHasher {
inner: Hasher,
}
impl GpuHasher {
/// Wrapper for [`Hasher::new`](../struct.Hasher.html#method.new).
#[inline]
pub fn new() -> Self {
Self {
inner: Hasher::new(),
}
}
/// Wrapper for [`Hasher::new_keyed`](../struct.Hasher.html#method.new_keyed).
#[inline]
pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
Self {
inner: Hasher::new_keyed(key),
}
}
/// Wrapper for [`Hasher::new_derive_key`](../struct.Hasher.html#method.new_derive_key).
#[inline]
pub fn new_derive_key(context: &str) -> Self {
Self {
inner: Hasher::new_derive_key(context),
}
}
/// Obtain the [`GpuControl`](struct.GpuControl.html) to hash full chunks starting with `chunk_counter`
/// or parent nodes.
pub fn gpu_control(&self, chunk_counter: u64) -> GpuControl {
GpuControl::new(&self.key, chunk_counter, self.chunk_state.flags)
}
/// GPU-accelerated version of [`update_with_join`].
///
/// Unlike [`update_with_join`], this method receives the parents computed
/// by one or more applications of the BLAKE3 shader.
///
/// This method has several restrictions. The size of the shader input must
/// be a power of two, it must be naturally aligned within the hash input,
/// and the hasher state must not have any leftover bytes in its internal
/// buffers. The simplest way to follow these invariants is to use this
/// method, with the same chunk count and buffer size, for all of the input
/// except for a variable-sized tail, which can use [`update_with_join`] or
/// [`update`].
///
/// Note: the chunk counter is implicit in this method, but it must be the
/// same as the chunk counter in the [`GpuControl`] passed to the shader,
/// otherwise it will lead to a wrong hash output.
///
/// Note: on a big-endian host, this method will swap the endianness of the
/// shader output in-place.
///
/// [`update`]: #method.update
/// [`update_with_join`]: #method.update_with_join
/// [`GpuControl`]: struct.GpuControl.html
pub fn update_from_gpu<J: Join>(&mut self, chunk_count: u64, parents: &mut [u8]) -> &mut Self {
assert_eq!(self.chunk_state.len(), 0, "leftover buffered bytes");
let chunk_counter = self.chunk_state.chunk_counter;
// These three checks make sure the increment of t0 in the shader did not overflow.
assert!(chunk_count.is_power_of_two(), "bad chunk count");
assert!(chunk_count <= (1 << 32), "chunk count overflow");
assert_eq!(chunk_counter % chunk_count, 0, "misaligned hash");
assert_eq!(parents.len() % OUT_LEN, 0, "invalid hash size");
let parent_count = (parents.len() / OUT_LEN) as u64;
assert_eq!(chunk_count % parent_count, 0, "invalid child count");
// The lazy merge of the CV stack needs at least 2 inputs.
// And compress_parents_to_parent_node needs at least 2 blocks.
assert!(parent_count > 2, "invalid parent count");
// The shader inputs and outputs are 32-bit words, which are in native byte order.
// The chunk shader byte swaps its input, but neither shader byte swaps its output.
// Since the rest of the code assumes little endian, byte swap the buffer here.
Self::swap_endian::<J>(parents);
let cv_pair = compress_parents_to_parent_node::<J>(
parents,
&self.key,
self.chunk_state.flags,
self.chunk_state.platform,
);
let left_cv = array_ref!(cv_pair, 0, 32);
let right_cv = array_ref!(cv_pair, 32, 32);
// Push the two CVs we received into the CV stack in order. Because
// the stack merges lazily, this guarantees we aren't merging the
// root.
self.push_cv(left_cv, chunk_counter);
self.push_cv(right_cv, chunk_counter + (chunk_count / 2));
self.chunk_state.chunk_counter += chunk_count;
self
}
// CPU simulation of the BLAKE3 chunk shader.
//
// This can be used to test the real shader.
//
// Note: unlike the real shader, this simulation always uses little-endian
// inputs and outputs.
#[doc(hidden)]
pub fn simulate_chunk_shader<J: Join>(
&self,
count: usize,
input: &[u8],
output: &mut [u8],
control: &GpuControl,
) {
assert_eq!(input.len(), count * CHUNK_LEN, "invalid input size");
assert_eq!(output.len(), count * OUT_LEN, "invalid output size");
if count > self.chunk_state.platform.simd_degree() {
let mid = count / 2;
let (left_in, right_in) = input.split_at(mid * CHUNK_LEN);
let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN);
let control_r = control.plus_chunks(mid as u64);
J::join(
|| self.simulate_chunk_shader::<J>(mid, left_in, left_out, control),
|| self.simulate_chunk_shader::<J>(count - mid, right_in, right_out, &control_r),
left_in.len(),
right_in.len(),
);
} else if count > 0 {
let mut chunks = ArrayVec::<[&[u8; CHUNK_LEN]; MAX_SIMD_DEGREE]>::new();
for chunk in input.chunks_exact(CHUNK_LEN) {
chunks.push(array_ref!(chunk, 0, CHUNK_LEN));
}
self.chunk_state.platform.hash_many(
&chunks,
control.key(),
control.chunk_counter(),
IncrementCounter::Yes,
control.flags(),
CHUNK_START,
CHUNK_END,
output,
);
}
}
// CPU simulation of the BLAKE3 parent shader.
//
// This can be used to test the real shader.
//
// Note: unlike the real shader, this simulation always uses little-endian
// inputs and outputs.
#[doc(hidden)]
pub fn simulate_parent_shader<J: Join>(
&self,
count: usize,
input: &[u8],
output: &mut [u8],
control: &GpuControl,
) {
assert_eq!(input.len(), count * BLOCK_LEN, "invalid input size");
assert_eq!(output.len(), count * OUT_LEN, "invalid output size");
if count > self.chunk_state.platform.simd_degree() {
let mid = count / 2;
let (left_in, right_in) = input.split_at(mid * BLOCK_LEN);
let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN);
let control_r = control.plus_chunks(mid as u64);
J::join(
|| self.simulate_parent_shader::<J>(mid, left_in, left_out, control),
|| self.simulate_parent_shader::<J>(count - mid, right_in, right_out, &control_r),
left_in.len(),
right_in.len(),
);
} else if count > 0 {
let mut parents = ArrayVec::<[&[u8; BLOCK_LEN]; MAX_SIMD_DEGREE]>::new();
for parent in input.chunks_exact(BLOCK_LEN) {
parents.push(array_ref!(parent, 0, BLOCK_LEN));
}
self.chunk_state.platform.hash_many(
&parents,
control.key(),
0,
IncrementCounter::No,
control.flags() | PARENT,
0,
0,
output,
);
}
}
#[doc(hidden)]
#[cfg(target_endian = "big")]
pub fn swap_endian<J: Join>(buffer: &mut [u8]) {
debug_assert!(buffer.len().is_power_of_two(), "invalid buffer size");
debug_assert_eq!(buffer.len() % OUT_LEN, 0, "invalid buffer size");
if buffer.len() > OUT_LEN {
let (left, right) = buffer.split_at_mut(buffer.len() / 2);
let left_len = left.len();
let right_len = right.len();
J::join(
|| Self::swap_endian::<J>(left),
|| Self::swap_endian::<J>(right),
left_len,
right_len,
);
} else {
for buf in buffer.chunks_exact_mut(4) {
buf.swap(0, 3);
buf.swap(1, 2);
}
}
}
#[doc(hidden)]
#[inline(always)]
#[cfg(target_endian = "little")]
pub fn swap_endian<J: Join>(_buffer: &mut [u8]) {}
}
impl Deref for GpuHasher {
type Target = Hasher;
#[inline]
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for GpuHasher {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl From<GpuHasher> for Hasher {
#[inline]
fn from(hasher: GpuHasher) -> Hasher {
hasher.inner
}
}
/// SPIR-V shader modules.
pub mod shaders {
/// Shader module for one level of the BLAKE3 tree.
pub mod blake3 {
/// Returns the SPIR-V code for the chunk shader module.
#[cfg(target_endian = "big")]
pub fn chunk_shader() -> &'static [u8] {
include_bytes!("shaders/blake3-chunk-be.spv")
}
/// Returns the SPIR-V code for the chunk shader module.
#[cfg(target_endian = "little")]
pub fn chunk_shader() -> &'static [u8] {
include_bytes!("shaders/blake3-chunk-le.spv")
}
/// Returns the SPIR-V code for the parent shader module.
pub fn parent_shader() -> &'static [u8] {
include_bytes!("shaders/blake3-parent.spv")
}
/// The local workgroup size.
pub const WORKGROUP_SIZE: usize = 128;
/// The descriptor binding for the input buffer.
pub const INPUT_BUFFER_BINDING: u32 = 0;
/// The descriptor binding for the output buffer.
pub const OUTPUT_BUFFER_BINDING: u32 = 1;
/// The size of the control uniform.
pub const CONTROL_UNIFORM_SIZE: usize = 11 * 4;
}
}
#[cfg(test)]
#[cfg(feature = "std")]
mod tests {
use super::*;
fn selftest_seq(len: usize) -> Vec<u8> {
let seed = len as u32;
let mut out = Vec::with_capacity(len);
let mut a = seed.wrapping_mul(0xDEAD4BAD);
let mut b = 1;
for _ in 0..len {
let t = a.wrapping_add(b);
a = b;
b = t;
out.push((t >> 24) as u8);
}
out
}
#[cfg(not(feature = "rayon"))]
type Join = join::SerialJoin;
#[cfg(feature = "rayon")]
type Join = join::RayonJoin;
#[test]
fn simulate_shader_one_level_once() {
let len = CHUNK_LEN * 128;
let input = selftest_seq(len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(128, &input, &mut buffer, &hasher.gpu_control(0));
GpuHasher::swap_endian::<Join>(&mut buffer);
hasher.update_from_gpu::<Join>(128, &mut buffer);
assert_eq!(hasher.finalize(), expected);
}
#[test]
fn simulate_shader_one_level_twice() {
let len = CHUNK_LEN * 128;
let input = selftest_seq(2 * len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(
128,
&input[..len],
&mut buffer,
&hasher.gpu_control(0),
);
GpuHasher::swap_endian::<Join>(&mut buffer);
hasher.update_from_gpu::<Join>(128, &mut buffer);
hasher.simulate_chunk_shader::<Join>(
128,
&input[len..],
&mut buffer,
&hasher.gpu_control(128),
);
GpuHasher::swap_endian::<Join>(&mut buffer);
hasher.update_from_gpu::<Join>(128, &mut buffer);
assert_eq!(hasher.finalize(), expected);
}
#[test]
fn simulate_shader_two_levels_once() {
let len = 2 * CHUNK_LEN * 128;
let input = selftest_seq(len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer1 = vec![0; 2 * OUT_LEN * 128];
let mut buffer2 = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(2 * 128, &input, &mut buffer1, &hasher.gpu_control(0));
hasher.simulate_parent_shader::<Join>(128, &buffer1, &mut buffer2, &hasher.gpu_control(0));
GpuHasher::swap_endian::<Join>(&mut buffer2);
hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2);
assert_eq!(hasher.finalize(), expected);
}
#[test]
fn simulate_shader_two_levels_twice() {
let len = 2 * CHUNK_LEN * 128;
let input = selftest_seq(2 * len);
let expected = Hasher::new().update_with_join::<Join>(&input).finalize();
let mut hasher = GpuHasher::new();
let mut buffer1 = vec![0; 2 * OUT_LEN * 128];
let mut buffer2 = vec![0; OUT_LEN * 128];
hasher.simulate_chunk_shader::<Join>(
2 * 128,
| flags | identifier_name |
imp.rs | use super::{
anyhow, env, env_logger, remove_file, DateTime, PathBuf, Receiver, RefCell, Releaser, Result,
Url, Utc, Version, UPDATE_INTERVAL,
};
use crate::Updater;
use std::cell::Cell;
use std::cell::Ref;
use std::cell::RefMut;
use std::path::Path;
use std::sync::mpsc;
pub(super) const LATEST_UPDATE_INFO_CACHE_FN_ASYNC: &str = "last_check_status_async.json";
// Payload that the worker thread will send back
type ReleasePayloadResult = Result<Option<UpdateInfo>>;
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct UpdaterState {
pub(super) last_check: Cell<Option<DateTime<Utc>>>,
current_version: Version,
avail_release: RefCell<Option<UpdateInfo>>,
#[serde(skip, default = "default_interval")]
update_interval: i64,
#[serde(skip)]
worker_state: RefCell<Option<MPSCState>>,
}
impl UpdaterState {
pub(super) fn current_version(&self) -> &Version {
&self.current_version
}
pub(super) fn set_version(&mut self, v: Version) {
self.current_version = v;
}
pub(super) fn latest_avail_version(&self) -> Option<Version> {
self.avail_release
.borrow()
.as_ref()
.map(|ui| ui.version().clone())
}
pub(super) fn borrow_worker(&self) -> Ref<'_, Option<MPSCState>> {
self.worker_state.borrow()
}
pub(super) fn borrow_worker_mut(&self) -> RefMut<'_, Option<MPSCState>> {
self.worker_state.borrow_mut()
}
pub(super) fn download_url(&self) -> Option<Url> {
self.avail_release
.borrow()
.as_ref()
.map(|info| info.downloadable_url.clone())
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub(super) struct UpdateInfo {
// Latest version available from github or releaser
pub version: Version,
pub fetched_at: Option<DateTime<Utc>>,
// Link to use to download the above version
pub downloadable_url: Url,
}
impl UpdateInfo {
pub fn new(v: Version, url: Url) -> Self {
UpdateInfo {
version: v,
fetched_at: None,
downloadable_url: url,
}
}
pub(super) fn version(&self) -> &Version {
&self.version
}
pub(super) fn fetched_at(&self) -> Option<&DateTime<Utc>> {
self.fetched_at.as_ref()
}
pub(super) fn set_fetched_at(&mut self, date_time: DateTime<Utc>) {
self.fetched_at = Some(date_time);
}
}
#[derive(Debug)]
pub(super) struct MPSCState {
// First successful call on rx.recv() will cache the results into this field
recvd_payload: RefCell<Option<ReleasePayloadResult>>,
// Receiver end of communication channel with worker thread
rx: RefCell<Option<Receiver<ReleasePayloadResult>>>,
}
impl MPSCState {
pub(super) fn new(rx: mpsc::Receiver<ReleasePayloadResult>) -> Self {
MPSCState {
recvd_payload: RefCell::new(None),
rx: RefCell::new(Some(rx)),
}
}
}
impl<T> Updater<T>
where
T: Releaser + Send +'static,
{
pub(super) fn load_or_new(r: T) -> Result<Self> {
let _ = env_logger::try_init();
if let Ok(mut saved_state) = Self::load() {
// Use the version that workflow reports through environment variable
// This version takes priortiy over what we may have saved last time.
let env_ver = env::workflow_version().and_then(|v| Version::parse(&v).ok());
if let Some(v) = env_ver {
saved_state.current_version = v;
}
Ok(Updater {
state: saved_state,
releaser: RefCell::new(r),
})
} else {
let current_version = env::workflow_version()
.map_or_else(|| Ok(Version::new(0, 0, 0)), |v| Version::parse(&v))?;
let state = UpdaterState {
current_version,
last_check: Cell::new(None),
avail_release: RefCell::new(None),
worker_state: RefCell::new(None),
update_interval: UPDATE_INTERVAL,
};
let updater = Updater {
state,
releaser: RefCell::new(r),
};
updater.save()?;
Ok(updater)
}
}
pub(super) fn last_check(&self) -> Option<DateTime<Utc>> {
self.state.last_check.get()
}
pub(super) fn set_last_check(&self, t: DateTime<Utc>) {
self.state.last_check.set(Some(t));
}
pub(super) fn update_interval(&self) -> i64 {
self.state.update_interval
}
pub(super) fn set_update_interval(&mut self, t: i64) {
self.state.update_interval = t;
}
fn load() -> Result<UpdaterState> {
let data_file_path = Self::build_data_fn()?;
crate::Data::load_from_file(data_file_path)
.ok_or_else(|| anyhow!("cannot load cached state of updater"))
}
// Save updater's state
pub(super) fn save(&self) -> Result<()> {
let data_file_path = Self::build_data_fn()?;
crate::Data::save_to_file(&data_file_path, &self.state).map_err(|e| {
let _r = remove_file(data_file_path);
e
})
}
pub(super) fn start_releaser_worker(
&self,
tx: mpsc::Sender<ReleasePayloadResult>,
p: PathBuf,
) -> Result<()> {
use std::thread;
let releaser = (*self.releaser.borrow()).clone();
thread::Builder::new().spawn(move || {
debug!("other thread: starting in updater thread");
let talk_to_mother = || -> Result<()> {
let (v, url) = releaser.latest_release()?;
let mut info = UpdateInfo::new(v, url);
info.set_fetched_at(Utc::now());
let payload = Some(info);
Self::write_last_check_status(&p, &payload)?;
tx.send(Ok(payload))?;
Ok(())
};
let outcome = talk_to_mother();
debug!("other thread: finished checking releaser status");
if let Err(error) = outcome {
tx.send(Err(error))
.expect("could not send error from thread");
}
})?;
Ok(())
}
// write version of latest avail. release (if any) to a cache file
pub(super) fn write_last_check_status(
p: &Path,
updater_info: &Option<UpdateInfo>,
) -> Result<()> {
crate::Data::save_to_file(p, updater_info).map_err(|e| {
let _r = remove_file(p);
e
})
}
// read version of latest avail. release (if any) from a cache file
pub(super) fn read_last_check_status(p: &Path) -> Result<Option<UpdateInfo>> {
crate::Data::load_from_file(p).ok_or_else(|| anyhow!("no data in given path"))
}
pub(super) fn build_data_fn() -> Result<PathBuf> {
let workflow_name = env::workflow_name()
.unwrap_or_else(|| "YouForgotTo/フ:NameYourOwnWork}flowッ".to_string())
.chars()
.map(|c| if c.is_ascii_alphanumeric() { c } else { '_' })
.collect::<String>();
env::workflow_cache()
.ok_or_else(|| {
anyhow!("missing env variable for cache dir. forgot to set workflow bundle id?")
})
.and_then(|mut data_path| {
env::workflow_uid()
.ok_or_else(|| anyhow!("missing env variable for uid"))
.map(|ref uid| {
let filename = [uid, "-", workflow_name.as_str(), "-updater.json"].concat();
data_path.push(filename);
data_path
})
})
}
pub(super) fn update_ready_async(&self, try_flag: bool) -> Result<bool> {
self.state
.worker_state
.borrow()
.as_ref()
.ok_or_else(|| anyhow!("you need to use init() method first."))
.and_then(|mpsc| {
if mpsc.recvd_payload.borrow().is_none() {
// No payload received yet, try to talk to worker thread
mpsc.rx
.borrow()
.as_ref()
.ok_or_else(|| anyhow!("you need to use init() correctly!"))
.and_then(|rx| {
let rr = if try_flag {
// don't block while trying to receive
rx.try_recv().map_err(|e| anyhow!(e.to_string()))
} else {
// block while waiting to receive
rx.recv().map_err(|e| anyhow!(e.to_string()))
};
rr.and_then(|msg| {
let msg_status = msg.map(|update_info| {
// received good message, update cache for received payload
*self.state.avail_release.borrow_mut() = update_info.clone();
// update last_check if received info is newer than last_check
update_info.as_ref().map(|ui| {
ui.fetched_at().map(|fetched_time| {
if self.last_check().is_none()
|| self.last_check().as_ref().unwrap()
< fetched_time
{
self.set_last_check(*fetched_time);
}
})
});
*mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info));
});
// save state regardless of content of msg
self.save()?;
msg_status?;
Ok(())
})
})?;
}
Ok(())
})?;
Ok(self
.state
.avail_release
.borrow()
.as_ref()
.map_or(false, |release| *self.current_version() < release.version))
}
#[allow(dead_code)]
#[deprecated(note = "update_ready_async is deprecated. use init()")]
pub(super) fn _update_ready_async(&self) -> Result<bool> {
let worker_state = self.state.worker_state.borrow();
assert!(worker_state.is_some(), "you need to use init first");
let mpsc = worker_state.as_ref().expect("no worker_state");
if mpsc.recvd_payload.borrow().is_none() {
let rx_option = mpsc.rx.borrow();
let rx = rx_option.as_ref().unwrap();
let rr = rx.recv();
if rr.is_ok() {
let msg = rr.as_ref().unwrap();
if msg.is_ok() {
let update_info = msg.as_ref().unwrap();
*self.state.avail_release.borrow_mut() = update_info.clone();
*mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info.clone()));
} else {
return Err(anyhow!(format!("{:?}", msg.as_ref().unwrap_err())));
}
self.save()?;
} else {
eprintln!("{:?}", rr);
return Err(anyhow!(format!("{:?}", rr)));
} | Ok(true)
} else {
Ok(false)
}
} else {
Ok(false)
}
}
#[allow(dead_code)]
#[deprecated(note = "update_ready_sync is deprecated. use init()")]
pub(super) fn _update_ready_sync(&self) -> Result<bool> {
// A None value for last_check indicates that workflow is being run for first time.
// Thus we update last_check to now and just save the updater state without asking
// Releaser to do a remote call/check for us since we assume that user just downloaded
// the workflow.
const LATEST_UPDATE_INFO_CACHE_FN: &str = "last_check_status.json";
// file for status of last update check
let p = Self::build_data_fn()?.with_file_name(LATEST_UPDATE_INFO_CACHE_FN);
// make a network call to see if a newer version is avail.
// save the result of call to cache file.
let ask_releaser_for_update = || -> Result<bool> {
let (v, url) = self.releaser.borrow().latest_release()?;
let update_avail = *self.current_version() < v;
let now = Utc::now();
let payload = {
let mut info = UpdateInfo::new(v, url);
info.set_fetched_at(now);
Some(info)
};
self.set_last_check(now);
Self::write_last_check_status(&p, &payload)?;
*self.state.avail_release.borrow_mut() = payload;
self.save()?;
Ok(update_avail)
};
// if first time checking, just update the updater's timestamp, no network call
if self.last_check().is_none() {
self.set_last_check(Utc::now());
self.save()?;
Ok(false)
} else if self.due_to_check() {
// it's time to talk to remote server
ask_releaser_for_update()
} else {
Self::read_last_check_status(&p)
.map(|last_check_status| {
last_check_status.map_or(false, |last_update_info| {
*self.current_version() < last_update_info.version
})
//.unwrap_or(false)
})
.or(Ok(false))
}
}
}
pub(super) fn default_interval() -> i64 {
UPDATE_INTERVAL
} | }
if let Some(ref updater_info) = *self.state.avail_release.borrow() {
if *self.current_version() < updater_info.version { | random_line_split |
imp.rs | use super::{
anyhow, env, env_logger, remove_file, DateTime, PathBuf, Receiver, RefCell, Releaser, Result,
Url, Utc, Version, UPDATE_INTERVAL,
};
use crate::Updater;
use std::cell::Cell;
use std::cell::Ref;
use std::cell::RefMut;
use std::path::Path;
use std::sync::mpsc;
pub(super) const LATEST_UPDATE_INFO_CACHE_FN_ASYNC: &str = "last_check_status_async.json";
// Payload that the worker thread will send back
type ReleasePayloadResult = Result<Option<UpdateInfo>>;
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct UpdaterState {
pub(super) last_check: Cell<Option<DateTime<Utc>>>,
current_version: Version,
avail_release: RefCell<Option<UpdateInfo>>,
#[serde(skip, default = "default_interval")]
update_interval: i64,
#[serde(skip)]
worker_state: RefCell<Option<MPSCState>>,
}
impl UpdaterState {
pub(super) fn current_version(&self) -> &Version {
&self.current_version
}
pub(super) fn set_version(&mut self, v: Version) {
self.current_version = v;
}
pub(super) fn latest_avail_version(&self) -> Option<Version> {
self.avail_release
.borrow()
.as_ref()
.map(|ui| ui.version().clone())
}
pub(super) fn | (&self) -> Ref<'_, Option<MPSCState>> {
self.worker_state.borrow()
}
pub(super) fn borrow_worker_mut(&self) -> RefMut<'_, Option<MPSCState>> {
self.worker_state.borrow_mut()
}
pub(super) fn download_url(&self) -> Option<Url> {
self.avail_release
.borrow()
.as_ref()
.map(|info| info.downloadable_url.clone())
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub(super) struct UpdateInfo {
// Latest version available from github or releaser
pub version: Version,
pub fetched_at: Option<DateTime<Utc>>,
// Link to use to download the above version
pub downloadable_url: Url,
}
impl UpdateInfo {
pub fn new(v: Version, url: Url) -> Self {
UpdateInfo {
version: v,
fetched_at: None,
downloadable_url: url,
}
}
pub(super) fn version(&self) -> &Version {
&self.version
}
pub(super) fn fetched_at(&self) -> Option<&DateTime<Utc>> {
self.fetched_at.as_ref()
}
pub(super) fn set_fetched_at(&mut self, date_time: DateTime<Utc>) {
self.fetched_at = Some(date_time);
}
}
#[derive(Debug)]
pub(super) struct MPSCState {
// First successful call on rx.recv() will cache the results into this field
recvd_payload: RefCell<Option<ReleasePayloadResult>>,
// Receiver end of communication channel with worker thread
rx: RefCell<Option<Receiver<ReleasePayloadResult>>>,
}
impl MPSCState {
pub(super) fn new(rx: mpsc::Receiver<ReleasePayloadResult>) -> Self {
MPSCState {
recvd_payload: RefCell::new(None),
rx: RefCell::new(Some(rx)),
}
}
}
impl<T> Updater<T>
where
T: Releaser + Send +'static,
{
pub(super) fn load_or_new(r: T) -> Result<Self> {
let _ = env_logger::try_init();
if let Ok(mut saved_state) = Self::load() {
// Use the version that workflow reports through environment variable
// This version takes priortiy over what we may have saved last time.
let env_ver = env::workflow_version().and_then(|v| Version::parse(&v).ok());
if let Some(v) = env_ver {
saved_state.current_version = v;
}
Ok(Updater {
state: saved_state,
releaser: RefCell::new(r),
})
} else {
let current_version = env::workflow_version()
.map_or_else(|| Ok(Version::new(0, 0, 0)), |v| Version::parse(&v))?;
let state = UpdaterState {
current_version,
last_check: Cell::new(None),
avail_release: RefCell::new(None),
worker_state: RefCell::new(None),
update_interval: UPDATE_INTERVAL,
};
let updater = Updater {
state,
releaser: RefCell::new(r),
};
updater.save()?;
Ok(updater)
}
}
pub(super) fn last_check(&self) -> Option<DateTime<Utc>> {
self.state.last_check.get()
}
pub(super) fn set_last_check(&self, t: DateTime<Utc>) {
self.state.last_check.set(Some(t));
}
pub(super) fn update_interval(&self) -> i64 {
self.state.update_interval
}
pub(super) fn set_update_interval(&mut self, t: i64) {
self.state.update_interval = t;
}
fn load() -> Result<UpdaterState> {
let data_file_path = Self::build_data_fn()?;
crate::Data::load_from_file(data_file_path)
.ok_or_else(|| anyhow!("cannot load cached state of updater"))
}
// Save updater's state
pub(super) fn save(&self) -> Result<()> {
let data_file_path = Self::build_data_fn()?;
crate::Data::save_to_file(&data_file_path, &self.state).map_err(|e| {
let _r = remove_file(data_file_path);
e
})
}
pub(super) fn start_releaser_worker(
&self,
tx: mpsc::Sender<ReleasePayloadResult>,
p: PathBuf,
) -> Result<()> {
use std::thread;
let releaser = (*self.releaser.borrow()).clone();
thread::Builder::new().spawn(move || {
debug!("other thread: starting in updater thread");
let talk_to_mother = || -> Result<()> {
let (v, url) = releaser.latest_release()?;
let mut info = UpdateInfo::new(v, url);
info.set_fetched_at(Utc::now());
let payload = Some(info);
Self::write_last_check_status(&p, &payload)?;
tx.send(Ok(payload))?;
Ok(())
};
let outcome = talk_to_mother();
debug!("other thread: finished checking releaser status");
if let Err(error) = outcome {
tx.send(Err(error))
.expect("could not send error from thread");
}
})?;
Ok(())
}
// write version of latest avail. release (if any) to a cache file
pub(super) fn write_last_check_status(
p: &Path,
updater_info: &Option<UpdateInfo>,
) -> Result<()> {
crate::Data::save_to_file(p, updater_info).map_err(|e| {
let _r = remove_file(p);
e
})
}
// read version of latest avail. release (if any) from a cache file
pub(super) fn read_last_check_status(p: &Path) -> Result<Option<UpdateInfo>> {
crate::Data::load_from_file(p).ok_or_else(|| anyhow!("no data in given path"))
}
pub(super) fn build_data_fn() -> Result<PathBuf> {
let workflow_name = env::workflow_name()
.unwrap_or_else(|| "YouForgotTo/フ:NameYourOwnWork}flowッ".to_string())
.chars()
.map(|c| if c.is_ascii_alphanumeric() { c } else { '_' })
.collect::<String>();
env::workflow_cache()
.ok_or_else(|| {
anyhow!("missing env variable for cache dir. forgot to set workflow bundle id?")
})
.and_then(|mut data_path| {
env::workflow_uid()
.ok_or_else(|| anyhow!("missing env variable for uid"))
.map(|ref uid| {
let filename = [uid, "-", workflow_name.as_str(), "-updater.json"].concat();
data_path.push(filename);
data_path
})
})
}
pub(super) fn update_ready_async(&self, try_flag: bool) -> Result<bool> {
self.state
.worker_state
.borrow()
.as_ref()
.ok_or_else(|| anyhow!("you need to use init() method first."))
.and_then(|mpsc| {
if mpsc.recvd_payload.borrow().is_none() {
// No payload received yet, try to talk to worker thread
mpsc.rx
.borrow()
.as_ref()
.ok_or_else(|| anyhow!("you need to use init() correctly!"))
.and_then(|rx| {
let rr = if try_flag {
// don't block while trying to receive
rx.try_recv().map_err(|e| anyhow!(e.to_string()))
} else {
// block while waiting to receive
rx.recv().map_err(|e| anyhow!(e.to_string()))
};
rr.and_then(|msg| {
let msg_status = msg.map(|update_info| {
// received good message, update cache for received payload
*self.state.avail_release.borrow_mut() = update_info.clone();
// update last_check if received info is newer than last_check
update_info.as_ref().map(|ui| {
ui.fetched_at().map(|fetched_time| {
if self.last_check().is_none()
|| self.last_check().as_ref().unwrap()
< fetched_time
{
self.set_last_check(*fetched_time);
}
})
});
*mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info));
});
// save state regardless of content of msg
self.save()?;
msg_status?;
Ok(())
})
})?;
}
Ok(())
})?;
Ok(self
.state
.avail_release
.borrow()
.as_ref()
.map_or(false, |release| *self.current_version() < release.version))
}
#[allow(dead_code)]
#[deprecated(note = "update_ready_async is deprecated. use init()")]
pub(super) fn _update_ready_async(&self) -> Result<bool> {
let worker_state = self.state.worker_state.borrow();
assert!(worker_state.is_some(), "you need to use init first");
let mpsc = worker_state.as_ref().expect("no worker_state");
if mpsc.recvd_payload.borrow().is_none() {
let rx_option = mpsc.rx.borrow();
let rx = rx_option.as_ref().unwrap();
let rr = rx.recv();
if rr.is_ok() {
let msg = rr.as_ref().unwrap();
if msg.is_ok() {
let update_info = msg.as_ref().unwrap();
*self.state.avail_release.borrow_mut() = update_info.clone();
*mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info.clone()));
} else {
return Err(anyhow!(format!("{:?}", msg.as_ref().unwrap_err())));
}
self.save()?;
} else {
eprintln!("{:?}", rr);
return Err(anyhow!(format!("{:?}", rr)));
}
}
if let Some(ref updater_info) = *self.state.avail_release.borrow() {
if *self.current_version() < updater_info.version {
Ok(true)
} else {
Ok(false)
}
} else {
Ok(false)
}
}
#[allow(dead_code)]
#[deprecated(note = "update_ready_sync is deprecated. use init()")]
pub(super) fn _update_ready_sync(&self) -> Result<bool> {
// A None value for last_check indicates that workflow is being run for first time.
// Thus we update last_check to now and just save the updater state without asking
// Releaser to do a remote call/check for us since we assume that user just downloaded
// the workflow.
const LATEST_UPDATE_INFO_CACHE_FN: &str = "last_check_status.json";
// file for status of last update check
let p = Self::build_data_fn()?.with_file_name(LATEST_UPDATE_INFO_CACHE_FN);
// make a network call to see if a newer version is avail.
// save the result of call to cache file.
let ask_releaser_for_update = || -> Result<bool> {
let (v, url) = self.releaser.borrow().latest_release()?;
let update_avail = *self.current_version() < v;
let now = Utc::now();
let payload = {
let mut info = UpdateInfo::new(v, url);
info.set_fetched_at(now);
Some(info)
};
self.set_last_check(now);
Self::write_last_check_status(&p, &payload)?;
*self.state.avail_release.borrow_mut() = payload;
self.save()?;
Ok(update_avail)
};
// if first time checking, just update the updater's timestamp, no network call
if self.last_check().is_none() {
self.set_last_check(Utc::now());
self.save()?;
Ok(false)
} else if self.due_to_check() {
// it's time to talk to remote server
ask_releaser_for_update()
} else {
Self::read_last_check_status(&p)
.map(|last_check_status| {
last_check_status.map_or(false, |last_update_info| {
*self.current_version() < last_update_info.version
})
//.unwrap_or(false)
})
.or(Ok(false))
}
}
}
pub(super) fn default_interval() -> i64 {
UPDATE_INTERVAL
}
| borrow_worker | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.