file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) &...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?; | }
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
} | Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect() | random_line_split |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self |
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) &...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
} | identifier_body |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn | (e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) &...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| or | identifier_name |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() |
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) &...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| {
return Err(serde::de::Error::custom("invalid string index"));
} | conditional_block |
xterm.rs | use with [`OwoColorize::color`](OwoColorize::color)
/// or [`OwoColorize::on_color`](OwoColorize::on_color)
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum XtermColors {
$(
#[allow(missing_docs)]
$name,
)*
}
impl crate::DynColor for XtermColors {
fn fmt_ansi_fg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("\x1b[38;5;", stringify!($xterm_num), "m"),
)*
};
f.write_str(color)
}
fn fmt_ansi_bg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("\x1b[48;5;", stringify!($xterm_num), "m"),
)*
};
f.write_str(color)
}
fn fmt_raw_ansi_fg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("38;5;", stringify!($xterm_num)),
)*
};
f.write_str(color)
}
fn fmt_raw_ansi_bg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("48;5;", stringify!($xterm_num)),
)*
};
f.write_str(color)
}
#[doc(hidden)]
fn get_dyncolors_fg(&self) -> crate::DynColors {
crate::DynColors::Xterm(*self)
}
#[doc(hidden)]
fn get_dyncolors_bg(&self) -> crate::DynColors {
crate::DynColors::Xterm(*self)
}
}
impl From<u8> for XtermColors {
fn from(x: u8) -> Self {
match x {
$(
$xterm_num => XtermColors::$name,
)*
}
}
}
| fn from(color: XtermColors) -> Self {
match color {
$(
XtermColors::$name => $xterm_num,
)*
}
}
}
}
$(
#[allow(missing_docs)]
pub struct $name;
impl crate::Color for $name {
const ANSI_FG: &'static str = concat!("\x1b[38;5;", stringify!($xterm_num), "m");
const ANSI_BG: &'static str = concat!("\x1b[48;5;", stringify!($xterm_num), "m");
const RAW_ANSI_BG: &'static str = concat!("48;5;", stringify!($xterm_num));
const RAW_ANSI_FG: &'static str = concat!("48;5;", stringify!($xterm_num));
#[doc(hidden)]
type DynEquivelant = dynamic::XtermColors;
#[doc(hidden)]
const DYN_EQUIVELANT: Self::DynEquivelant = dynamic::XtermColors::$name;
#[doc(hidden)]
fn into_dyncolors() -> crate::DynColors {
crate::DynColors::Xterm(dynamic::XtermColors::$name)
}
}
)*
};
}
xterm_colors! {
0 UserBlack (0,0,0)
1 UserRed (128,0,0)
2 UserGreen (0,128,0)
3 UserYellow (128,128,0)
4 UserBlue (0,0,128)
5 UserMagenta (128,0,128)
6 UserCyan (0,128,128)
7 UserWhite (192,192,192)
8 UserBrightBlack (128,128,128)
9 UserBrightRed (255,0,0)
10 UserBrightGreen (0,255,0)
11 UserBrightYellow (255,255,0)
12 UserBrightBlue (0,0,255)
13 UserBrightMagenta (255,0,255)
14 UserBrightCyan (0,255,255)
15 UserBrightWhite (255,255,255)
16 Black (0,0,0)
17 StratosBlue (0,0,95)
18 NavyBlue (0,0,135)
19 MidnightBlue (0,0,175)
20 DarkBlue (0,0,215)
21 Blue (0,0,255)
22 CamaroneGreen (0,95,0)
23 BlueStone (0,95,95)
24 OrientBlue (0,95,135)
25 EndeavourBlue (0,95,175)
26 ScienceBlue (0,95,215)
27 BlueRibbon (0,95,255)
28 JapaneseLaurel (0,135,0)
29 DeepSeaGreen (0,135,95)
30 Teal (0,135,135)
31 DeepCerulean (0,135,175)
32 LochmaraBlue (0,135,215)
33 AzureRadiance (0,135,255)
34 LightJapaneseLaurel (0,175,0)
35 Jade (0,175,95)
36 PersianGreen (0,175,135)
37 BondiBlue (0,175,175)
38 Cerulean (0,175,215)
39 LightAzureRadiance (0,175,255)
40 DarkGreen (0,215,0)
41 Malachite (0,215,95)
42 CaribbeanGreen (0,215,135)
43 LightCaribbeanGreen (0,215,175)
44 RobinEggBlue (0,215,215)
45 Aqua (0,215,255)
46 Green (0,255,0)
47 DarkSpringGreen (0,255,95)
48 SpringGreen (0,255,135)
49 LightSpringGreen (0,255,175)
50 BrightTurquoise (0,255,215)
51 Cyan (0,255,255)
52 Rosewood (95,0,0)
53 PompadourMagenta (95,0,95)
54 PigmentIndigo (95,0,135)
55 DarkPurple (95,0,175)
56 ElectricIndigo (95,0,215)
57 ElectricPurple (95,0,255)
58 VerdunGreen (95,95,0)
59 ScorpionOlive (95,95,95)
60 Lilac (95,95,135)
61 ScampiIndigo (95,95,175)
62 Indigo (95,95,215)
63 DarkCornflowerBlue (95,95,255)
64 DarkLimeade (95,135,0)
65 GladeGreen (95,135,95)
66 JuniperGreen (95,135,135)
67 HippieBlue (95,135,175)
68 HavelockBlue (95,135,215)
69 CornflowerBlue (95,135,255)
70 Limeade (95,175,0)
71 FernGreen (95,175,95)
72 SilverTree (95,175,135)
73 Tradewind (95,175,175)
74 ShakespeareBlue (95,175,215)
75 DarkMalibuBlue (95,175,255)
76 DarkBrightGreen (95,215,0)
77 DarkPastelGreen (95,215,95)
78 PastelGreen (95,215,135)
79 DownyTeal (95,215,175)
80 Viking (95,215,215)
81 MalibuBlue (95,215,255)
82 BrightGreen (95,255,0)
83 DarkScreaminGreen (95,255,95)
84 ScreaminGreen (95,255,135)
85 DarkAquamarine (95,255,175)
86 Aquamarine (95,255,215)
87 LightAquamarine (95,255,255)
88 Maroon (135,0,0)
89 DarkFreshEggplant (135,0,95)
90 LightFreshEggplant (135,0,135)
91 Purple (135,0,175)
92 ElectricViolet (135,0,215)
93 LightElectricViolet (135,0,255)
94 Brown (135,95,0)
95 CopperRose (135,95,95)
96 StrikemasterPurple (135,95,135)
97 DelugePurple (135,95,175)
98 DarkMediumPurple (135,95,215)
99 DarkHeliotropePurple (135,95,255)
100 Olive (135,135,0)
101 ClayCreekOlive (135,135,95)
102 DarkGray (135,135,135)
103 WildBlueYonder (135,135,175)
104 ChetwodeBlue (135,135,215)
105 SlateBlue (135,135,255)
106 LightLimeade (135,175,0)
107 ChelseaCucumber (135,175,95)
108 BayLeaf (135,175,135)
109 GulfStream (135,175,175)
110 PoloBlue (135,175,215)
111 LightMalibuBlue (135,175,255)
112 Pistachio (135,215,0)
113 LightPastelGreen (135,215,95)
114 DarkFeijoaGreen (135,215,135)
115 VistaBlue (135,215,175)
116 Bermuda (135,215,215)
117 DarkAnakiwaBlue (135,215,255)
118 ChartreuseGreen (135,255,0)
119 LightScreaminGreen (135,255,95)
120 DarkMintGreen (135,255,135)
121 MintGreen (135,255,175)
122 LighterAquamarine (135,255,215)
123 AnakiwaBlue (135,255,255)
124 BrightRed (175,0,0)
125 DarkFlirt (175,0,95)
126 Flirt (175,0,135)
127 LightFlirt (175,0,175)
128 DarkViolet (175,0,215)
129 BrightElectricViolet (175,0,255)
130 RoseofSharonOrange (175,95,0)
131 MatrixPink (175,95,95)
132 TapestryPink (175,95,135)
133 FuchsiaPink (175,95,175)
134 MediumPurple (175,95,215)
135 Heliotrope (175,95,255)
136 PirateGold (175,135,0)
137 MuesliOrange (175,135,95)
138 PharlapPink (175,135,135)
139 Bouquet (175,135,175)
140 Lavender (175,135,215)
141 LightHeliotrope (175,135,255)
142 BuddhaGold (175,175,0)
143 OliveGreen (175,175,95)
144 HillaryOlive (175,175,135)
145 SilverChalice (175,175,175)
146 WistfulLilac (175,175,215)
147 MelroseLilac (175,175,255)
148 RioGrandeGreen (175,215,0)
149 ConiferGreen (175,215,95)
150 Feijoa (175,215,135)
151 PixieGreen (175,215,175)
152 JungleMist (175,215,215)
153 LightAnakiwaBlue (175,215,255)
154 Lime (175,255,0)
155 GreenYellow (175,255,95)
156 LightMintGreen (175,255,135)
157 Celadon (175,255,175)
158 AeroBlue (175,255,215)
159 FrenchPassLightBlue (175,255,255)
160 GuardsmanRed (215,0,0)
161 RazzmatazzCerise (215,0,95)
162 MediumVioletRed (215,0,135)
163 HollywoodCerise (215,0,175)
164 DarkPurplePizzazz (215,0,215)
165 BrighterElectricViolet (215,0,255)
166 TennOrange (215,95,0)
167 RomanOrange (215,95,95)
168 CranberryPink (215,95,135)
169 HopbushPink (215,95,175)
170 Orchid (215,95,215)
171 LighterHeliotrope (215,95,255)
172 MangoTango (215,135,0)
173 Copperfield (215,135,95)
174 SeaPink (215,135,135)
175 CanCanPink (215,135,175)
176 LightOrchid (215,135,215)
177 BrightHeliotrope (215,135,255)
178 DarkCorn (215,175,0)
179 DarkTachaOrange (215,175,95)
180 TanBeige (215,175,135)
181 ClamShell (215,175,175)
182 ThistlePink (215,175,215)
183 Mauve (215,175,255)
184 Corn (215,215,0)
185 TachaOrange (215,215,95)
186 DecoOrange (215,215,135)
187 PaleGoldenrod (215,215,175)
188 AltoBeige (215,215,215)
189 FogPink (215,215,255)
190 ChartreuseYellow (215,255,0)
191 Canary (215,255,95)
192 Honeysuckle (215,255,135)
193 ReefPaleYellow (215,255,175)
194 SnowyMint (215,255,215)
195 OysterBay (215,255,255)
196 Red (255,0,0)
197 DarkRose (255,0,95)
198 Rose (255,0,135)
199 LightHollywoodCerise (255,0,175)
200 PurplePizzazz (255,0,215)
201 Fuchsia (255,0,255)
202 BlazeOrange (255,95,0)
203 BittersweetOrange (255,95,95)
204 WildWatermelon (255,95,135)
205 DarkHotPink (255,95,175)
206 HotPink (255,95,215)
207 PinkFlamingo (255,95,255)
208 FlushOrange (255,135,0)
209 Salmon (255,135,95)
210 VividTangerine (255,135,135)
211 PinkSalmon (255,135,175)
212 DarkLavenderRose (255,135,215)
213 BlushPink (255,135,255)
214 YellowSea (255,175,0)
215 TexasRose (255,175,95)
216 Tacao (255,175,135)
217 Sundown (255,175,175)
218 CottonCandy (255,175,215)
219 LavenderRose (255,175,255)
220 Gold (255,215,0)
221 Dandelion (255,215,95)
222 GrandisCaramel (255,215,135)
223 Caramel (255,215,175)
224 CosmosSalmon (255,215,215)
225 PinkLace (255,215,255)
226 Yellow (255,255,0)
227 LaserLemon (255,255,95)
228 DollyYellow (255,255,135)
229 PortafinoYellow (255,255,175)
230 Cumulus (255,255,215)
231 White (255,255,255)
232 DarkCodGray (8,8,8)
233 CodGray (18,18,18)
234 LightCodGray (28,28,28)
235 DarkMineShaft (38,38,38)
236 MineShaft (48,48,48)
237 LightMineShaft (58,58,58)
238 DarkTundora (68,68,68)
239 Tundora (78,78,78)
240 ScorpionGray (88,88,88)
241 DarkDoveGray (98,98,98)
242 DoveGray | impl From<XtermColors> for u8 { | random_line_split |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new() | t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)|!intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x|!max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store:!hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)|!layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))|!info1.dependents.is_empty() && layout_map.contains_key(&info1.id) &&!layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)|!shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x|!shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store =!info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))|!rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)|!intersect.is_empty())
.max_by_key(|&(pos, ref intersect)|!intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn insert<I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> {
let rc = Rc::new(val);
if!self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
}
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
} | }
}
pub struct KnowledgeBase<T: ReteIntrospection> { | random_line_split |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new()
}
}
pub struct KnowledgeBase<T: ReteIntrospection> {
t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) | else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)|!intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x|!max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store:!hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)|!layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))|!info1.dependents.is_empty() && layout_map.contains_key(&info1.id) &&!layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)|!shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x|!shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store =!info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))|!rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)|!intersect.is_empty())
.max_by_key(|&(pos, ref intersect)|!intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn insert<I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> {
let rc = Rc::new(val);
if!self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
}
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
}
| {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} | conditional_block |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new()
}
}
pub struct KnowledgeBase<T: ReteIntrospection> {
t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)|!intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x|!max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store:!hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)|!layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))|!info1.dependents.is_empty() && layout_map.contains_key(&info1.id) &&!layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)|!shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x|!shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store =!info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))|!rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)|!intersect.is_empty())
.max_by_key(|&(pos, ref intersect)|!intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn | <I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> {
let rc = Rc::new(val);
if!self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
}
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
}
| insert | identifier_name |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new()
}
}
pub struct KnowledgeBase<T: ReteIntrospection> {
t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)|!intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x|!max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store:!hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)|!layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))|!info1.dependents.is_empty() && layout_map.contains_key(&info1.id) &&!layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)|!shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x|!shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store =!info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))|!rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)|!intersect.is_empty())
.max_by_key(|&(pos, ref intersect)|!intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn insert<I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> |
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
}
| {
let rc = Rc::new(val);
if !self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
} | identifier_body |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else |
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf {
self.destruct().unwrap_err()
}
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
}
| {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
} | conditional_block |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
| /// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
}
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf {
self.destruct().unwrap_err()
}
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
} | /// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the | random_line_split |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
}
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf |
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
}
| {
self.destruct().unwrap_err()
} | identifier_body |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
}
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf {
self.destruct().unwrap_err()
}
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
}
| fmt | identifier_name |
easy.rs | use super::*;
use crate::utils::over;
pub fn init<B: Backend>(
window: &crate::windowing::window::Window,
name: &str,
version: u32,
) -> Result<
(
B::Instance,
B::Surface,
Format,
Adapter<B>,
B::Device,
QueueGroup<B>,
B::CommandPool,
),
&'static str,
> | .find(|format| format.base_format().1 == ChannelType::Srgb)
.unwrap_or(default_format)
};
let (device, queue_group) = {
let queue_family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.ok_or("failed to find queue family")?;
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(queue_family, &[1.0])], gfx_hal::Features::empty())
.expect("Failed to open device")
};
(gpu.device, gpu.queue_groups.pop().unwrap())
};
let command_pool = unsafe {
use gfx_hal::pool::CommandPoolCreateFlags;
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::empty())
.expect("out of memory")
};
Ok((
instance,
surface,
surface_color_format,
adapter,
device,
queue_group,
command_pool,
))
}
pub fn desc_sets<B: Backend>(
device: &B::Device,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) -> (
B::DescriptorSetLayout,
B::DescriptorPool,
Vec<B::DescriptorSet>,
) {
use gfx_hal::pso::*;
let sets = values.len();
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
let mut binding_number = 0;
let mut bindings = vec![];
let mut ranges = vec![];
for _ in 0..ubos {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..images {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..samplers {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Sampler,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Sampler,
count: sets,
});
binding_number += 1;
}
let (layout, pool, mut desc_sets) = unsafe {
let layout = device
.create_descriptor_set_layout(bindings.into_iter(), over([]))
.unwrap();
let mut pool = device
.create_descriptor_pool(sets, ranges.into_iter(), DescriptorPoolCreateFlags::empty())
.unwrap();
let mut desc_sets = Vec::with_capacity(sets);
for _ in 0..sets {
desc_sets.push(pool.allocate_one(&layout).unwrap());
}
(layout, pool, desc_sets)
};
write_desc_sets::<B>(device, desc_sets.iter_mut().collect(), values);
(layout, pool, desc_sets)
}
pub fn write_desc_sets<B: Backend>(
device: &B::Device,
desc_sets: Vec<&mut B::DescriptorSet>,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) {
use gfx_hal::pso::*;
assert!(
desc_sets.len() == values.len() &&!values.is_empty(),
"Must supply a matching, non-zero number of desc_sets and values"
);
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
for (set_values, desc_set) in values.into_iter().zip(desc_sets.into_iter()) {
use gfx_hal::buffer::SubRange;
let mut descriptors = Vec::with_capacity(ubos + images + samplers);
for buffer in set_values.0 {
descriptors.push(Descriptor::Buffer(buffer, SubRange::WHOLE));
}
for image in set_values.1 {
descriptors.push(Descriptor::Image(image, gfx_hal::image::Layout::Undefined));
}
for sampler in set_values.2 {
descriptors.push(Descriptor::Sampler(sampler));
}
unsafe {
if!descriptors.is_empty() {
device.write_descriptor_set(DescriptorSetWrite {
set: desc_set,
binding: 0,
array_offset: 0,
descriptors: descriptors.into_iter(),
});
}
}
}
}
pub fn render_pass<B: Backend>(
device: &B::Device,
surface_color_format: Format,
depth_format: Option<Format>,
intermediate: bool,
) -> B::RenderPass {
use gfx_hal::image::Layout;
use gfx_hal::pass::{
Attachment, AttachmentLoadOp, AttachmentOps, AttachmentStoreOp, SubpassDesc,
};
let end_layout = if intermediate {
Layout::ShaderReadOnlyOptimal
} else {
Layout::Present
};
let color_attachment = Attachment {
format: Some(surface_color_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::Store),
stencil_ops: AttachmentOps::DONT_CARE,
layouts: Layout::Undefined..end_layout,
};
let depth_attachment = depth_format.map(|surface_depth_format| Attachment {
format: Some(surface_depth_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
stencil_ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
layouts: Layout::Undefined..Layout::DepthStencilAttachmentOptimal,
});
let subpass = SubpassDesc {
colors: &[(0, Layout::ColorAttachmentOptimal)],
depth_stencil: depth_format.map(|_| &(1, Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
unsafe {
let attachments = match depth_attachment {
Some(depth_attachment) => vec![color_attachment, depth_attachment],
None => vec![color_attachment],
};
device
.create_render_pass(attachments.into_iter(), over([subpass]), over([]))
.expect("out of memory")
}
}
pub fn pipeline<B: SupportedBackend>(
device: &B::Device,
desc_layout: Option<&B::DescriptorSetLayout>,
push_constant_size: u32,
vs_bytes: &[u8],
fs_bytes: &[u8],
render_pass: &B::RenderPass,
depth_format: Option<Format>,
attribute_sizes: &[u32],
) -> (B::GraphicsPipeline, B::PipelineLayout) {
use gfx_hal::pso::*;
let push = vec![(
ShaderStageFlags::VERTEX | ShaderStageFlags::FRAGMENT,
0..push_constant_size,
)];
let push = if push_constant_size > 0 { push } else { vec![] };
let pipeline_layout = unsafe {
device
.create_pipeline_layout(desc_layout.into_iter(), push.into_iter())
.expect("out of memory")
};
let shader_modules = [(vs_bytes, false), (fs_bytes, true)]
.iter()
.map(|&(bytes, is_frag)| unsafe { B::make_shader_module(device, bytes, is_frag) })
.collect::<Vec<_>>();
let mut entries = shader_modules.iter().map(|module| EntryPoint::<B> {
entry: "main",
module,
specialization: Default::default(),
});
let stride = attribute_sizes.iter().sum::<u32>() * std::mem::size_of::<f32>() as u32;
let buffer_desc = if stride > 0 {
vec![VertexBufferDesc {
binding: 0,
stride,
rate: VertexInputRate::Vertex,
}]
} else {
vec![]
};
let mut offset = 0;
let mut attrs = vec![];
for (index, &size) in attribute_sizes.iter().enumerate() {
attrs.push(AttributeDesc {
location: index as u32,
binding: 0,
element: Element {
format: match size {
1 => Format::R32Sfloat,
2 => Format::Rg32Sfloat,
3 => Format::Rgb32Sfloat,
4 => Format::Rgba32Sfloat,
n => panic!("invalid attribute size {}", n),
},
offset,
},
});
offset += size * std::mem::size_of::<f32>() as u32;
}
let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
buffers: &buffer_desc,
attributes: &attrs,
input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
vertex: entries.next().unwrap(),
tessellation: None,
geometry: None,
};
let mut pipeline_desc = GraphicsPipelineDesc::new(
primitive_assembler,
Rasterizer {
cull_face: Face::BACK,
..Rasterizer::FILL
},
entries.next(),
&pipeline_layout,
gfx_hal::pass::Subpass {
index: 0,
main_pass: &render_pass,
},
);
pipeline_desc.blender.targets.push(ColorBlendDesc {
mask: ColorMask::ALL,
blend: Some(BlendState::ALPHA),
});
if depth_format.is_some() {
pipeline_desc.depth_stencil = DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
};
}
let pipeline = unsafe {
let pipeline = device
.create_graphics_pipeline(&pipeline_desc, None)
.expect("failed to create graphics pipeline");
for module in shader_modules {
device.destroy_shader_module(module);
}
pipeline
};
(pipeline, pipeline_layout)
}
pub fn reconfigure_swapchain<B: Backend>(
surface: &mut B::Surface,
adapter: &Adapter<B>,
device: &B::Device,
surface_color_format: Format,
surface_extent: &mut gfx_hal::window::Extent2D,
) -> FramebufferAttachment {
use gfx_hal::window::SwapchainConfig;
let caps = surface.capabilities(&adapter.physical_device);
let mut swapchain_config =
SwapchainConfig::from_caps(&caps, surface_color_format, *surface_extent);
let framebuffer_attachment = swapchain_config.framebuffer_attachment();
// This seems to fix some fullscreen slowdown on macOS.
if caps.image_count.contains(&3) {
swapchain_config.image_count = 3;
}
*surface_extent = swapchain_config.extent;
unsafe {
surface
.configure_swapchain(device, swapchain_config)
.expect("failed to configure swapchain");
};
framebuffer_attachment
}
// TODO: Remove viewport pls
pub fn acquire_framebuffer<B: Backend>(
device: &B::Device,
surface: &mut B::Surface,
surface_extent: &gfx_hal::window::Extent2D,
render_pass: &B::RenderPass,
framebuffer_attachment: gfx_hal::image::FramebufferAttachment,
) -> Result<
(
B::Framebuffer,
<B::Surface as PresentationSurface<B>>::SwapchainImage,
gfx_hal::pso::Viewport,
),
(),
> {
let acquire_timeout_ns = 1_000_000_000;
match unsafe { surface.acquire_image(acquire_timeout_ns) } {
Ok((surface_image, _)) => unsafe {
use gfx_hal::image::Extent;
let framebuffer = device
.create_framebuffer(
render_pass,
over([framebuffer_attachment]),
Extent {
width: surface_extent.width,
height: surface_extent.height,
depth: 1,
},
)
.unwrap();
let viewport = {
use gfx_hal::pso::Rect;
Viewport {
rect: Rect {
x: 0,
y: 0,
w: surface_extent.width as i16,
h: surface_extent.height as i16,
},
depth: 0.0..1.0,
}
};
Ok((framebuffer, surface_image, viewport))
},
Err(_) => Err(()),
}
}
| {
let instance = B::Instance::create(name, version).map_err(|_| "unsupported backend")?;
let surface = unsafe {
instance
.create_surface(window)
.map_err(|_| "create_surface failed")?
};
let adapter = instance.enumerate_adapters().remove(0);
let surface_color_format = {
use gfx_hal::format::ChannelType;
let supported_formats = surface
.supported_formats(&adapter.physical_device)
.unwrap_or(vec![]);
let default_format = *supported_formats.get(0).unwrap_or(&Format::Rgba8Srgb);
supported_formats
.into_iter() | identifier_body |
easy.rs | use super::*;
use crate::utils::over;
pub fn init<B: Backend>(
window: &crate::windowing::window::Window,
name: &str,
version: u32,
) -> Result<
(
B::Instance,
B::Surface,
Format,
Adapter<B>,
B::Device,
QueueGroup<B>,
B::CommandPool,
),
&'static str,
> {
let instance = B::Instance::create(name, version).map_err(|_| "unsupported backend")?;
let surface = unsafe {
instance
.create_surface(window)
.map_err(|_| "create_surface failed")?
};
let adapter = instance.enumerate_adapters().remove(0);
let surface_color_format = {
use gfx_hal::format::ChannelType;
let supported_formats = surface
.supported_formats(&adapter.physical_device)
.unwrap_or(vec![]);
let default_format = *supported_formats.get(0).unwrap_or(&Format::Rgba8Srgb);
supported_formats
.into_iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.unwrap_or(default_format)
};
let (device, queue_group) = {
let queue_family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.ok_or("failed to find queue family")?;
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(queue_family, &[1.0])], gfx_hal::Features::empty())
.expect("Failed to open device")
};
(gpu.device, gpu.queue_groups.pop().unwrap())
};
let command_pool = unsafe {
use gfx_hal::pool::CommandPoolCreateFlags;
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::empty())
.expect("out of memory")
};
Ok((
instance,
surface,
surface_color_format,
adapter,
device,
queue_group,
command_pool,
))
}
pub fn desc_sets<B: Backend>(
device: &B::Device,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) -> (
B::DescriptorSetLayout,
B::DescriptorPool,
Vec<B::DescriptorSet>,
) {
use gfx_hal::pso::*;
let sets = values.len();
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
let mut binding_number = 0;
let mut bindings = vec![];
let mut ranges = vec![];
for _ in 0..ubos {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..images {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..samplers {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Sampler,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Sampler,
count: sets,
});
binding_number += 1;
}
let (layout, pool, mut desc_sets) = unsafe {
let layout = device
.create_descriptor_set_layout(bindings.into_iter(), over([]))
.unwrap();
let mut pool = device
.create_descriptor_pool(sets, ranges.into_iter(), DescriptorPoolCreateFlags::empty())
.unwrap();
let mut desc_sets = Vec::with_capacity(sets);
for _ in 0..sets {
desc_sets.push(pool.allocate_one(&layout).unwrap());
}
(layout, pool, desc_sets)
};
write_desc_sets::<B>(device, desc_sets.iter_mut().collect(), values);
(layout, pool, desc_sets)
}
pub fn write_desc_sets<B: Backend>(
device: &B::Device,
desc_sets: Vec<&mut B::DescriptorSet>,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) {
use gfx_hal::pso::*;
assert!(
desc_sets.len() == values.len() &&!values.is_empty(),
"Must supply a matching, non-zero number of desc_sets and values"
);
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
for (set_values, desc_set) in values.into_iter().zip(desc_sets.into_iter()) {
use gfx_hal::buffer::SubRange;
let mut descriptors = Vec::with_capacity(ubos + images + samplers);
for buffer in set_values.0 {
descriptors.push(Descriptor::Buffer(buffer, SubRange::WHOLE));
}
for image in set_values.1 {
descriptors.push(Descriptor::Image(image, gfx_hal::image::Layout::Undefined));
}
for sampler in set_values.2 {
descriptors.push(Descriptor::Sampler(sampler));
}
unsafe {
if!descriptors.is_empty() {
device.write_descriptor_set(DescriptorSetWrite {
set: desc_set,
binding: 0,
array_offset: 0,
descriptors: descriptors.into_iter(),
});
}
}
}
}
pub fn render_pass<B: Backend>(
device: &B::Device,
surface_color_format: Format,
depth_format: Option<Format>,
intermediate: bool,
) -> B::RenderPass {
use gfx_hal::image::Layout;
use gfx_hal::pass::{
Attachment, AttachmentLoadOp, AttachmentOps, AttachmentStoreOp, SubpassDesc,
};
let end_layout = if intermediate {
Layout::ShaderReadOnlyOptimal
} else {
Layout::Present
};
let color_attachment = Attachment {
format: Some(surface_color_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::Store),
stencil_ops: AttachmentOps::DONT_CARE,
layouts: Layout::Undefined..end_layout,
};
let depth_attachment = depth_format.map(|surface_depth_format| Attachment {
format: Some(surface_depth_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
stencil_ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
layouts: Layout::Undefined..Layout::DepthStencilAttachmentOptimal,
});
let subpass = SubpassDesc {
colors: &[(0, Layout::ColorAttachmentOptimal)],
depth_stencil: depth_format.map(|_| &(1, Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
unsafe {
let attachments = match depth_attachment {
Some(depth_attachment) => vec![color_attachment, depth_attachment],
None => vec![color_attachment],
};
device
.create_render_pass(attachments.into_iter(), over([subpass]), over([]))
.expect("out of memory")
}
}
pub fn pipeline<B: SupportedBackend>(
device: &B::Device,
desc_layout: Option<&B::DescriptorSetLayout>,
push_constant_size: u32,
vs_bytes: &[u8],
fs_bytes: &[u8],
render_pass: &B::RenderPass,
depth_format: Option<Format>,
attribute_sizes: &[u32],
) -> (B::GraphicsPipeline, B::PipelineLayout) {
use gfx_hal::pso::*;
let push = vec![(
ShaderStageFlags::VERTEX | ShaderStageFlags::FRAGMENT,
0..push_constant_size,
)];
let push = if push_constant_size > 0 { push } else { vec![] };
let pipeline_layout = unsafe {
device
.create_pipeline_layout(desc_layout.into_iter(), push.into_iter())
.expect("out of memory")
};
let shader_modules = [(vs_bytes, false), (fs_bytes, true)]
.iter()
.map(|&(bytes, is_frag)| unsafe { B::make_shader_module(device, bytes, is_frag) })
.collect::<Vec<_>>();
let mut entries = shader_modules.iter().map(|module| EntryPoint::<B> {
entry: "main",
module,
specialization: Default::default(),
});
let stride = attribute_sizes.iter().sum::<u32>() * std::mem::size_of::<f32>() as u32;
let buffer_desc = if stride > 0 {
vec![VertexBufferDesc {
binding: 0,
stride,
rate: VertexInputRate::Vertex,
}]
} else {
vec![]
};
let mut offset = 0;
let mut attrs = vec![];
for (index, &size) in attribute_sizes.iter().enumerate() {
attrs.push(AttributeDesc {
location: index as u32,
binding: 0,
element: Element {
format: match size {
1 => Format::R32Sfloat,
2 => Format::Rg32Sfloat,
3 => Format::Rgb32Sfloat,
4 => Format::Rgba32Sfloat,
n => panic!("invalid attribute size {}", n),
},
offset,
},
});
offset += size * std::mem::size_of::<f32>() as u32;
}
let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
buffers: &buffer_desc,
attributes: &attrs,
input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
vertex: entries.next().unwrap(),
tessellation: None,
geometry: None,
};
let mut pipeline_desc = GraphicsPipelineDesc::new(
primitive_assembler,
Rasterizer {
cull_face: Face::BACK,
..Rasterizer::FILL
},
entries.next(),
&pipeline_layout,
gfx_hal::pass::Subpass {
index: 0,
main_pass: &render_pass, | mask: ColorMask::ALL,
blend: Some(BlendState::ALPHA),
});
if depth_format.is_some() {
pipeline_desc.depth_stencil = DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
};
}
let pipeline = unsafe {
let pipeline = device
.create_graphics_pipeline(&pipeline_desc, None)
.expect("failed to create graphics pipeline");
for module in shader_modules {
device.destroy_shader_module(module);
}
pipeline
};
(pipeline, pipeline_layout)
}
pub fn reconfigure_swapchain<B: Backend>(
surface: &mut B::Surface,
adapter: &Adapter<B>,
device: &B::Device,
surface_color_format: Format,
surface_extent: &mut gfx_hal::window::Extent2D,
) -> FramebufferAttachment {
use gfx_hal::window::SwapchainConfig;
let caps = surface.capabilities(&adapter.physical_device);
let mut swapchain_config =
SwapchainConfig::from_caps(&caps, surface_color_format, *surface_extent);
let framebuffer_attachment = swapchain_config.framebuffer_attachment();
// This seems to fix some fullscreen slowdown on macOS.
if caps.image_count.contains(&3) {
swapchain_config.image_count = 3;
}
*surface_extent = swapchain_config.extent;
unsafe {
surface
.configure_swapchain(device, swapchain_config)
.expect("failed to configure swapchain");
};
framebuffer_attachment
}
// TODO: Remove viewport pls
pub fn acquire_framebuffer<B: Backend>(
device: &B::Device,
surface: &mut B::Surface,
surface_extent: &gfx_hal::window::Extent2D,
render_pass: &B::RenderPass,
framebuffer_attachment: gfx_hal::image::FramebufferAttachment,
) -> Result<
(
B::Framebuffer,
<B::Surface as PresentationSurface<B>>::SwapchainImage,
gfx_hal::pso::Viewport,
),
(),
> {
let acquire_timeout_ns = 1_000_000_000;
match unsafe { surface.acquire_image(acquire_timeout_ns) } {
Ok((surface_image, _)) => unsafe {
use gfx_hal::image::Extent;
let framebuffer = device
.create_framebuffer(
render_pass,
over([framebuffer_attachment]),
Extent {
width: surface_extent.width,
height: surface_extent.height,
depth: 1,
},
)
.unwrap();
let viewport = {
use gfx_hal::pso::Rect;
Viewport {
rect: Rect {
x: 0,
y: 0,
w: surface_extent.width as i16,
h: surface_extent.height as i16,
},
depth: 0.0..1.0,
}
};
Ok((framebuffer, surface_image, viewport))
},
Err(_) => Err(()),
}
} | },
);
pipeline_desc.blender.targets.push(ColorBlendDesc { | random_line_split |
easy.rs | use super::*;
use crate::utils::over;
pub fn init<B: Backend>(
window: &crate::windowing::window::Window,
name: &str,
version: u32,
) -> Result<
(
B::Instance,
B::Surface,
Format,
Adapter<B>,
B::Device,
QueueGroup<B>,
B::CommandPool,
),
&'static str,
> {
let instance = B::Instance::create(name, version).map_err(|_| "unsupported backend")?;
let surface = unsafe {
instance
.create_surface(window)
.map_err(|_| "create_surface failed")?
};
let adapter = instance.enumerate_adapters().remove(0);
let surface_color_format = {
use gfx_hal::format::ChannelType;
let supported_formats = surface
.supported_formats(&adapter.physical_device)
.unwrap_or(vec![]);
let default_format = *supported_formats.get(0).unwrap_or(&Format::Rgba8Srgb);
supported_formats
.into_iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.unwrap_or(default_format)
};
let (device, queue_group) = {
let queue_family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.ok_or("failed to find queue family")?;
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(queue_family, &[1.0])], gfx_hal::Features::empty())
.expect("Failed to open device")
};
(gpu.device, gpu.queue_groups.pop().unwrap())
};
let command_pool = unsafe {
use gfx_hal::pool::CommandPoolCreateFlags;
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::empty())
.expect("out of memory")
};
Ok((
instance,
surface,
surface_color_format,
adapter,
device,
queue_group,
command_pool,
))
}
pub fn desc_sets<B: Backend>(
device: &B::Device,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) -> (
B::DescriptorSetLayout,
B::DescriptorPool,
Vec<B::DescriptorSet>,
) {
use gfx_hal::pso::*;
let sets = values.len();
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
let mut binding_number = 0;
let mut bindings = vec![];
let mut ranges = vec![];
for _ in 0..ubos {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..images {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..samplers {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Sampler,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Sampler,
count: sets,
});
binding_number += 1;
}
let (layout, pool, mut desc_sets) = unsafe {
let layout = device
.create_descriptor_set_layout(bindings.into_iter(), over([]))
.unwrap();
let mut pool = device
.create_descriptor_pool(sets, ranges.into_iter(), DescriptorPoolCreateFlags::empty())
.unwrap();
let mut desc_sets = Vec::with_capacity(sets);
for _ in 0..sets {
desc_sets.push(pool.allocate_one(&layout).unwrap());
}
(layout, pool, desc_sets)
};
write_desc_sets::<B>(device, desc_sets.iter_mut().collect(), values);
(layout, pool, desc_sets)
}
pub fn write_desc_sets<B: Backend>(
device: &B::Device,
desc_sets: Vec<&mut B::DescriptorSet>,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) {
use gfx_hal::pso::*;
assert!(
desc_sets.len() == values.len() &&!values.is_empty(),
"Must supply a matching, non-zero number of desc_sets and values"
);
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
for (set_values, desc_set) in values.into_iter().zip(desc_sets.into_iter()) {
use gfx_hal::buffer::SubRange;
let mut descriptors = Vec::with_capacity(ubos + images + samplers);
for buffer in set_values.0 {
descriptors.push(Descriptor::Buffer(buffer, SubRange::WHOLE));
}
for image in set_values.1 {
descriptors.push(Descriptor::Image(image, gfx_hal::image::Layout::Undefined));
}
for sampler in set_values.2 {
descriptors.push(Descriptor::Sampler(sampler));
}
unsafe {
if!descriptors.is_empty() {
device.write_descriptor_set(DescriptorSetWrite {
set: desc_set,
binding: 0,
array_offset: 0,
descriptors: descriptors.into_iter(),
});
}
}
}
}
pub fn render_pass<B: Backend>(
device: &B::Device,
surface_color_format: Format,
depth_format: Option<Format>,
intermediate: bool,
) -> B::RenderPass {
use gfx_hal::image::Layout;
use gfx_hal::pass::{
Attachment, AttachmentLoadOp, AttachmentOps, AttachmentStoreOp, SubpassDesc,
};
let end_layout = if intermediate {
Layout::ShaderReadOnlyOptimal
} else {
Layout::Present
};
let color_attachment = Attachment {
format: Some(surface_color_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::Store),
stencil_ops: AttachmentOps::DONT_CARE,
layouts: Layout::Undefined..end_layout,
};
let depth_attachment = depth_format.map(|surface_depth_format| Attachment {
format: Some(surface_depth_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
stencil_ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
layouts: Layout::Undefined..Layout::DepthStencilAttachmentOptimal,
});
let subpass = SubpassDesc {
colors: &[(0, Layout::ColorAttachmentOptimal)],
depth_stencil: depth_format.map(|_| &(1, Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
unsafe {
let attachments = match depth_attachment {
Some(depth_attachment) => vec![color_attachment, depth_attachment],
None => vec![color_attachment],
};
device
.create_render_pass(attachments.into_iter(), over([subpass]), over([]))
.expect("out of memory")
}
}
pub fn pipeline<B: SupportedBackend>(
device: &B::Device,
desc_layout: Option<&B::DescriptorSetLayout>,
push_constant_size: u32,
vs_bytes: &[u8],
fs_bytes: &[u8],
render_pass: &B::RenderPass,
depth_format: Option<Format>,
attribute_sizes: &[u32],
) -> (B::GraphicsPipeline, B::PipelineLayout) {
use gfx_hal::pso::*;
let push = vec![(
ShaderStageFlags::VERTEX | ShaderStageFlags::FRAGMENT,
0..push_constant_size,
)];
let push = if push_constant_size > 0 { push } else | ;
let pipeline_layout = unsafe {
device
.create_pipeline_layout(desc_layout.into_iter(), push.into_iter())
.expect("out of memory")
};
let shader_modules = [(vs_bytes, false), (fs_bytes, true)]
.iter()
.map(|&(bytes, is_frag)| unsafe { B::make_shader_module(device, bytes, is_frag) })
.collect::<Vec<_>>();
let mut entries = shader_modules.iter().map(|module| EntryPoint::<B> {
entry: "main",
module,
specialization: Default::default(),
});
let stride = attribute_sizes.iter().sum::<u32>() * std::mem::size_of::<f32>() as u32;
let buffer_desc = if stride > 0 {
vec![VertexBufferDesc {
binding: 0,
stride,
rate: VertexInputRate::Vertex,
}]
} else {
vec![]
};
let mut offset = 0;
let mut attrs = vec![];
for (index, &size) in attribute_sizes.iter().enumerate() {
attrs.push(AttributeDesc {
location: index as u32,
binding: 0,
element: Element {
format: match size {
1 => Format::R32Sfloat,
2 => Format::Rg32Sfloat,
3 => Format::Rgb32Sfloat,
4 => Format::Rgba32Sfloat,
n => panic!("invalid attribute size {}", n),
},
offset,
},
});
offset += size * std::mem::size_of::<f32>() as u32;
}
let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
buffers: &buffer_desc,
attributes: &attrs,
input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
vertex: entries.next().unwrap(),
tessellation: None,
geometry: None,
};
let mut pipeline_desc = GraphicsPipelineDesc::new(
primitive_assembler,
Rasterizer {
cull_face: Face::BACK,
..Rasterizer::FILL
},
entries.next(),
&pipeline_layout,
gfx_hal::pass::Subpass {
index: 0,
main_pass: &render_pass,
},
);
pipeline_desc.blender.targets.push(ColorBlendDesc {
mask: ColorMask::ALL,
blend: Some(BlendState::ALPHA),
});
if depth_format.is_some() {
pipeline_desc.depth_stencil = DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
};
}
let pipeline = unsafe {
let pipeline = device
.create_graphics_pipeline(&pipeline_desc, None)
.expect("failed to create graphics pipeline");
for module in shader_modules {
device.destroy_shader_module(module);
}
pipeline
};
(pipeline, pipeline_layout)
}
pub fn reconfigure_swapchain<B: Backend>(
surface: &mut B::Surface,
adapter: &Adapter<B>,
device: &B::Device,
surface_color_format: Format,
surface_extent: &mut gfx_hal::window::Extent2D,
) -> FramebufferAttachment {
use gfx_hal::window::SwapchainConfig;
let caps = surface.capabilities(&adapter.physical_device);
let mut swapchain_config =
SwapchainConfig::from_caps(&caps, surface_color_format, *surface_extent);
let framebuffer_attachment = swapchain_config.framebuffer_attachment();
// This seems to fix some fullscreen slowdown on macOS.
if caps.image_count.contains(&3) {
swapchain_config.image_count = 3;
}
*surface_extent = swapchain_config.extent;
unsafe {
surface
.configure_swapchain(device, swapchain_config)
.expect("failed to configure swapchain");
};
framebuffer_attachment
}
// TODO: Remove viewport pls
pub fn acquire_framebuffer<B: Backend>(
device: &B::Device,
surface: &mut B::Surface,
surface_extent: &gfx_hal::window::Extent2D,
render_pass: &B::RenderPass,
framebuffer_attachment: gfx_hal::image::FramebufferAttachment,
) -> Result<
(
B::Framebuffer,
<B::Surface as PresentationSurface<B>>::SwapchainImage,
gfx_hal::pso::Viewport,
),
(),
> {
let acquire_timeout_ns = 1_000_000_000;
match unsafe { surface.acquire_image(acquire_timeout_ns) } {
Ok((surface_image, _)) => unsafe {
use gfx_hal::image::Extent;
let framebuffer = device
.create_framebuffer(
render_pass,
over([framebuffer_attachment]),
Extent {
width: surface_extent.width,
height: surface_extent.height,
depth: 1,
},
)
.unwrap();
let viewport = {
use gfx_hal::pso::Rect;
Viewport {
rect: Rect {
x: 0,
y: 0,
w: surface_extent.width as i16,
h: surface_extent.height as i16,
},
depth: 0.0..1.0,
}
};
Ok((framebuffer, surface_image, viewport))
},
Err(_) => Err(()),
}
}
| { vec![] } | conditional_block |
easy.rs | use super::*;
use crate::utils::over;
pub fn | <B: Backend>(
window: &crate::windowing::window::Window,
name: &str,
version: u32,
) -> Result<
(
B::Instance,
B::Surface,
Format,
Adapter<B>,
B::Device,
QueueGroup<B>,
B::CommandPool,
),
&'static str,
> {
let instance = B::Instance::create(name, version).map_err(|_| "unsupported backend")?;
let surface = unsafe {
instance
.create_surface(window)
.map_err(|_| "create_surface failed")?
};
let adapter = instance.enumerate_adapters().remove(0);
let surface_color_format = {
use gfx_hal::format::ChannelType;
let supported_formats = surface
.supported_formats(&adapter.physical_device)
.unwrap_or(vec![]);
let default_format = *supported_formats.get(0).unwrap_or(&Format::Rgba8Srgb);
supported_formats
.into_iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.unwrap_or(default_format)
};
let (device, queue_group) = {
let queue_family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.ok_or("failed to find queue family")?;
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(queue_family, &[1.0])], gfx_hal::Features::empty())
.expect("Failed to open device")
};
(gpu.device, gpu.queue_groups.pop().unwrap())
};
let command_pool = unsafe {
use gfx_hal::pool::CommandPoolCreateFlags;
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::empty())
.expect("out of memory")
};
Ok((
instance,
surface,
surface_color_format,
adapter,
device,
queue_group,
command_pool,
))
}
pub fn desc_sets<B: Backend>(
device: &B::Device,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) -> (
B::DescriptorSetLayout,
B::DescriptorPool,
Vec<B::DescriptorSet>,
) {
use gfx_hal::pso::*;
let sets = values.len();
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
let mut binding_number = 0;
let mut bindings = vec![];
let mut ranges = vec![];
for _ in 0..ubos {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..images {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..samplers {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Sampler,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Sampler,
count: sets,
});
binding_number += 1;
}
let (layout, pool, mut desc_sets) = unsafe {
let layout = device
.create_descriptor_set_layout(bindings.into_iter(), over([]))
.unwrap();
let mut pool = device
.create_descriptor_pool(sets, ranges.into_iter(), DescriptorPoolCreateFlags::empty())
.unwrap();
let mut desc_sets = Vec::with_capacity(sets);
for _ in 0..sets {
desc_sets.push(pool.allocate_one(&layout).unwrap());
}
(layout, pool, desc_sets)
};
write_desc_sets::<B>(device, desc_sets.iter_mut().collect(), values);
(layout, pool, desc_sets)
}
pub fn write_desc_sets<B: Backend>(
device: &B::Device,
desc_sets: Vec<&mut B::DescriptorSet>,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) {
use gfx_hal::pso::*;
assert!(
desc_sets.len() == values.len() &&!values.is_empty(),
"Must supply a matching, non-zero number of desc_sets and values"
);
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
for (set_values, desc_set) in values.into_iter().zip(desc_sets.into_iter()) {
use gfx_hal::buffer::SubRange;
let mut descriptors = Vec::with_capacity(ubos + images + samplers);
for buffer in set_values.0 {
descriptors.push(Descriptor::Buffer(buffer, SubRange::WHOLE));
}
for image in set_values.1 {
descriptors.push(Descriptor::Image(image, gfx_hal::image::Layout::Undefined));
}
for sampler in set_values.2 {
descriptors.push(Descriptor::Sampler(sampler));
}
unsafe {
if!descriptors.is_empty() {
device.write_descriptor_set(DescriptorSetWrite {
set: desc_set,
binding: 0,
array_offset: 0,
descriptors: descriptors.into_iter(),
});
}
}
}
}
pub fn render_pass<B: Backend>(
device: &B::Device,
surface_color_format: Format,
depth_format: Option<Format>,
intermediate: bool,
) -> B::RenderPass {
use gfx_hal::image::Layout;
use gfx_hal::pass::{
Attachment, AttachmentLoadOp, AttachmentOps, AttachmentStoreOp, SubpassDesc,
};
let end_layout = if intermediate {
Layout::ShaderReadOnlyOptimal
} else {
Layout::Present
};
let color_attachment = Attachment {
format: Some(surface_color_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::Store),
stencil_ops: AttachmentOps::DONT_CARE,
layouts: Layout::Undefined..end_layout,
};
let depth_attachment = depth_format.map(|surface_depth_format| Attachment {
format: Some(surface_depth_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
stencil_ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
layouts: Layout::Undefined..Layout::DepthStencilAttachmentOptimal,
});
let subpass = SubpassDesc {
colors: &[(0, Layout::ColorAttachmentOptimal)],
depth_stencil: depth_format.map(|_| &(1, Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
unsafe {
let attachments = match depth_attachment {
Some(depth_attachment) => vec![color_attachment, depth_attachment],
None => vec![color_attachment],
};
device
.create_render_pass(attachments.into_iter(), over([subpass]), over([]))
.expect("out of memory")
}
}
pub fn pipeline<B: SupportedBackend>(
device: &B::Device,
desc_layout: Option<&B::DescriptorSetLayout>,
push_constant_size: u32,
vs_bytes: &[u8],
fs_bytes: &[u8],
render_pass: &B::RenderPass,
depth_format: Option<Format>,
attribute_sizes: &[u32],
) -> (B::GraphicsPipeline, B::PipelineLayout) {
use gfx_hal::pso::*;
let push = vec![(
ShaderStageFlags::VERTEX | ShaderStageFlags::FRAGMENT,
0..push_constant_size,
)];
let push = if push_constant_size > 0 { push } else { vec![] };
let pipeline_layout = unsafe {
device
.create_pipeline_layout(desc_layout.into_iter(), push.into_iter())
.expect("out of memory")
};
let shader_modules = [(vs_bytes, false), (fs_bytes, true)]
.iter()
.map(|&(bytes, is_frag)| unsafe { B::make_shader_module(device, bytes, is_frag) })
.collect::<Vec<_>>();
let mut entries = shader_modules.iter().map(|module| EntryPoint::<B> {
entry: "main",
module,
specialization: Default::default(),
});
let stride = attribute_sizes.iter().sum::<u32>() * std::mem::size_of::<f32>() as u32;
let buffer_desc = if stride > 0 {
vec![VertexBufferDesc {
binding: 0,
stride,
rate: VertexInputRate::Vertex,
}]
} else {
vec![]
};
let mut offset = 0;
let mut attrs = vec![];
for (index, &size) in attribute_sizes.iter().enumerate() {
attrs.push(AttributeDesc {
location: index as u32,
binding: 0,
element: Element {
format: match size {
1 => Format::R32Sfloat,
2 => Format::Rg32Sfloat,
3 => Format::Rgb32Sfloat,
4 => Format::Rgba32Sfloat,
n => panic!("invalid attribute size {}", n),
},
offset,
},
});
offset += size * std::mem::size_of::<f32>() as u32;
}
let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
buffers: &buffer_desc,
attributes: &attrs,
input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
vertex: entries.next().unwrap(),
tessellation: None,
geometry: None,
};
let mut pipeline_desc = GraphicsPipelineDesc::new(
primitive_assembler,
Rasterizer {
cull_face: Face::BACK,
..Rasterizer::FILL
},
entries.next(),
&pipeline_layout,
gfx_hal::pass::Subpass {
index: 0,
main_pass: &render_pass,
},
);
pipeline_desc.blender.targets.push(ColorBlendDesc {
mask: ColorMask::ALL,
blend: Some(BlendState::ALPHA),
});
if depth_format.is_some() {
pipeline_desc.depth_stencil = DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
};
}
let pipeline = unsafe {
let pipeline = device
.create_graphics_pipeline(&pipeline_desc, None)
.expect("failed to create graphics pipeline");
for module in shader_modules {
device.destroy_shader_module(module);
}
pipeline
};
(pipeline, pipeline_layout)
}
pub fn reconfigure_swapchain<B: Backend>(
surface: &mut B::Surface,
adapter: &Adapter<B>,
device: &B::Device,
surface_color_format: Format,
surface_extent: &mut gfx_hal::window::Extent2D,
) -> FramebufferAttachment {
use gfx_hal::window::SwapchainConfig;
let caps = surface.capabilities(&adapter.physical_device);
let mut swapchain_config =
SwapchainConfig::from_caps(&caps, surface_color_format, *surface_extent);
let framebuffer_attachment = swapchain_config.framebuffer_attachment();
// This seems to fix some fullscreen slowdown on macOS.
if caps.image_count.contains(&3) {
swapchain_config.image_count = 3;
}
*surface_extent = swapchain_config.extent;
unsafe {
surface
.configure_swapchain(device, swapchain_config)
.expect("failed to configure swapchain");
};
framebuffer_attachment
}
// TODO: Remove viewport pls
pub fn acquire_framebuffer<B: Backend>(
device: &B::Device,
surface: &mut B::Surface,
surface_extent: &gfx_hal::window::Extent2D,
render_pass: &B::RenderPass,
framebuffer_attachment: gfx_hal::image::FramebufferAttachment,
) -> Result<
(
B::Framebuffer,
<B::Surface as PresentationSurface<B>>::SwapchainImage,
gfx_hal::pso::Viewport,
),
(),
> {
let acquire_timeout_ns = 1_000_000_000;
match unsafe { surface.acquire_image(acquire_timeout_ns) } {
Ok((surface_image, _)) => unsafe {
use gfx_hal::image::Extent;
let framebuffer = device
.create_framebuffer(
render_pass,
over([framebuffer_attachment]),
Extent {
width: surface_extent.width,
height: surface_extent.height,
depth: 1,
},
)
.unwrap();
let viewport = {
use gfx_hal::pso::Rect;
Viewport {
rect: Rect {
x: 0,
y: 0,
w: surface_extent.width as i16,
h: surface_extent.height as i16,
},
depth: 0.0..1.0,
}
};
Ok((framebuffer, surface_image, viewport))
},
Err(_) => Err(()),
}
}
| init | identifier_name |
client.rs | // #[macro_use]
extern crate actix;
// extern crate byteorder;
// extern crate bytes;
extern crate futures;
extern crate serde;
extern crate serde_json;
// extern crate tokio_io;
// extern crate tokio_tcp;
extern crate awc;
extern crate rustls;
extern crate structopt;
#[macro_use]
extern crate log;
extern crate env_logger;
// #[macro_use]
extern crate serde_derive;
use actix::{
// prelude::*, io::FramedWrite
io::{SinkWrite, WriteHandler},
prelude::*,
Actor,
ActorContext,
AsyncContext,
Context,
Handler,
StreamHandler,
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use futures::{
lazy,
/* future::ok, */ stream::{SplitSink, Stream},
Future, | // str::FromStr,
// time::Duration,
sync::Arc,
thread,
// net, process, thread,
};
// use tokio_io::{AsyncRead, io::WriteHalf};
// use tokio_tcp::TcpStream;
use awc::{
error::WsProtocolError,
http::StatusCode,
ws::{Codec, Frame, Message},
Client, Connector,
};
use rustls::ClientConfig;
use structopt::StructOpt;
// use webpki;
// use webpki_roots;
// mod codec;
// mod server;
// mod ws;
// mod util;
mod ws_var;
use ws_var::HEARTBEAT_INTERVAL;
#[derive(StructOpt, Debug, Clone)]
/// Generalized WebSocket Client
pub struct Opt {
/// Address to connect
#[structopt(short = "u", default_value = "https://localhost:443/ws")]
url: String,
/// Message to send. Set it to '-' to read stdin to send,
/// leave it blank to use stdin as console loop to send multiple messages.
#[structopt(short = "m", default_value = "")]
msg: String,
}
mod danger {
use rustls::{
self, Certificate, RootCertStore, ServerCertVerified, ServerCertVerifier, TLSError,
};
use webpki;
pub struct NoCertificateVerification {}
impl ServerCertVerifier for NoCertificateVerification {
fn verify_server_cert(
&self,
_roots: &RootCertStore,
_presented_certs: &[Certificate],
_dns_name: webpki::DNSNameRef<'_>,
_ocsp: &[u8],
) -> Result<ServerCertVerified, TLSError> {
Ok(ServerCertVerified::assertion())
}
}
}
fn main() -> io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
let opt = Opt::from_args();
// let sys = System::new("ws-client");
System::run(move || {
let mut cfg = ClientConfig::new();
// let protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
// cfg.set_protocols(&protos);
cfg.dangerous()
.set_certificate_verifier(Arc::new(danger::NoCertificateVerification {}));
let client = Client::build()
.connector(Connector::new().rustls(Arc::new(cfg)).finish())
.finish();
// sys.block_on(
Arbiter::spawn(lazy(move || {
client
.ws(&opt.url)
.connect()
.map_err(|e| panic!("{}", e))
.map(move |(response, framed)| {
let sys = System::current();
if response.status()!= StatusCode::SWITCHING_PROTOCOLS {
sys.stop();
}
let (sink, stream) = framed.split();
let addr = WsClient::create(|ctx| {
WsClient::add_stream(stream, ctx);
WsClient(SinkWrite::new(sink, ctx))
});
let read_stdin = || -> String {
let mut cmd = String::new();
if io::stdin().read_line(&mut cmd).is_err() {
println!("error");
}
cmd
};
if opt.msg.is_empty() {
// start console loop
thread::spawn(move || loop {
addr.do_send(ClientCommand(read_stdin()));
});
} else if opt.msg == "-" {
addr.do_send(ClientCommand(read_stdin()));
sys.stop();
} else {
addr.do_send(ClientCommand(opt.msg));
sys.stop();
}
})
}));
})
// ).unwrap();
// sys.block_on(
// ).unwrap();
// Arbiter::spawn(
// TcpStream::connect(&addr)
// .and_then(|stream| {
// let addr = WsClient::create(|ctx| {
// let (r, w) = stream.split();
// WsClient::add_stream(
// FramedRead::new(r, codec::ClientWsCodec),
// ctx,
// );
// WsClient {
// framed: FramedWrite::new(
// w,
// codec::ClientWsCodec,
// ctx,
// ),
// }
// });
// // start console loop
// thread::spawn(move || loop {
// let mut cmd = String::new();
// if io::stdin().read_line(&mut cmd).is_err() {
// println!("error");
// return;
// }
// addr.do_send(ClientCommand(cmd));
// });
// ok(())
// })
// .map_err(|e| {
// println!("Can not connect to server: {}", e);
// process::exit(1)
// }),
// );
// println!("Running ws client");
// sys.run()
}
// struct WsClient {
// framed: FramedWrite<WriteHalf<TcpStream>, codec::ClientWsCodec>,
// }
// #[derive(Message)]
// struct ClientCommand(String);
// impl Actor for WsClient {
// type Context = Context<Self>;
// fn started(&mut self, ctx: &mut Context<Self>) {
// // start heartbeats otherwise server will disconnect after 10 seconds
// self.hb(ctx)
// }
// fn stopped(&mut self, _: &mut Context<Self>) {
// println!("Disconnected");
// // Stop application on disconnect
// System::current().stop();
// }
// }
// impl WsClient {
// fn hb(&self, ctx: &mut Context<Self>) {
// ctx.run_later(Duration::new(, 0), |act, ctx| {
// act.framed.write(codec::WsRequest::Ping);
// act.hb(ctx);
// // client should also check for a timeout here, similar to the
// // server code
// });
// }
// }
// impl actix::io::WriteHandler<io::Error> for WsClient {}
// /// Handle stdin commands
// impl Handler<ClientCommand> for WsClient {
// type Result = ();
// fn handle(&mut self, msg: ClientCommand, _: &mut Context<Self>) {
// let m = msg.0.trim();
// if m.is_empty() {
// return;
// }
// // we check for /sss type of messages
// // if m.starts_with('/') {
// // let v: Vec<&str> = m.splitn(2,'').collect();
// // match v[0] {
// // "/list" => {
// // self.framed.write(codec::WsRequest::List);
// // }
// // "/join" => {
// // if v.len() == 2 {
// // self.framed.write(codec::WsRequest::Join(v[1].to_owned()));
// // } else {
// // println!("!!! room name is required");
// // }
// // }
// // _ => println!("!!! unknown command"),
// // }
// // } else {
// self.framed.write(codec::WsRequest::Message(m.to_owned()));
// // }
// }
// }
// /// Server communication
// impl StreamHandler<codec::WsResponse, io::Error> for WsClient {
// fn handle(&mut self, msg: codec::WsResponse, _: &mut Context<Self>) {
// match msg {
// codec::WsResponse::Message(ref msg) => {
// println!("message: {}", msg);
// }
// // codec::WsResponse::Joined(ref msg) => {
// // println!("!!! joined: {}", msg);
// // }
// // codec::WsResponse::Rooms(rooms) => {
// // println!("\n!!! Available rooms:");
// // for room in rooms {
// // println!("{}", room);
// // }
// // println!("");
// // }
// _ => (),
// }
// }
// }
struct WsClient<T>(SinkWrite<SplitSink<Framed<T, Codec>>>)
where
T: AsyncRead + AsyncWrite;
#[derive(Message)]
struct ClientCommand(String);
impl<T:'static> Actor for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Context<Self>) {
// start heartbeats otherwise server will disconnect after 10 seconds
self.hb(ctx)
}
fn stopped(&mut self, _: &mut Context<Self>) {
info!("Disconnected");
// Stop application on disconnect
System::current().stop();
}
}
impl<T:'static> WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn hb(&self, ctx: &mut Context<Self>) {
ctx.run_later(HEARTBEAT_INTERVAL, |act, ctx| {
act.0.write(Message::Ping(String::new())).unwrap();
act.hb(ctx);
// client should also check for a timeout here, similar to the
// server code
});
}
}
/// Handle stdin commands
impl<T:'static> Handler<ClientCommand> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Result = ();
fn handle(&mut self, msg: ClientCommand, _ctx: &mut Context<Self>) {
self.0.write(Message::Text(msg.0)).unwrap();
}
}
/// Handle server websocket messages
impl<T:'static> StreamHandler<Frame, WsProtocolError> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn handle(&mut self, msg: Frame, _ctx: &mut Context<Self>) {
match msg {
Frame::Text(txt) => println!("Server: {:?}", txt),
_ => (),
}
}
fn started(&mut self, _ctx: &mut Context<Self>) {
info!("Connected");
}
fn finished(&mut self, ctx: &mut Context<Self>) {
info!("Server disconnected");
ctx.stop()
}
}
impl<T:'static> WriteHandler<WsProtocolError> for WsClient<T> where T: AsyncRead + AsyncWrite {} | };
use std::{
io, | random_line_split |
client.rs | // #[macro_use]
extern crate actix;
// extern crate byteorder;
// extern crate bytes;
extern crate futures;
extern crate serde;
extern crate serde_json;
// extern crate tokio_io;
// extern crate tokio_tcp;
extern crate awc;
extern crate rustls;
extern crate structopt;
#[macro_use]
extern crate log;
extern crate env_logger;
// #[macro_use]
extern crate serde_derive;
use actix::{
// prelude::*, io::FramedWrite
io::{SinkWrite, WriteHandler},
prelude::*,
Actor,
ActorContext,
AsyncContext,
Context,
Handler,
StreamHandler,
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use futures::{
lazy,
/* future::ok, */ stream::{SplitSink, Stream},
Future,
};
use std::{
io,
// str::FromStr,
// time::Duration,
sync::Arc,
thread,
// net, process, thread,
};
// use tokio_io::{AsyncRead, io::WriteHalf};
// use tokio_tcp::TcpStream;
use awc::{
error::WsProtocolError,
http::StatusCode,
ws::{Codec, Frame, Message},
Client, Connector,
};
use rustls::ClientConfig;
use structopt::StructOpt;
// use webpki;
// use webpki_roots;
// mod codec;
// mod server;
// mod ws;
// mod util;
mod ws_var;
use ws_var::HEARTBEAT_INTERVAL;
#[derive(StructOpt, Debug, Clone)]
/// Generalized WebSocket Client
pub struct Opt {
/// Address to connect
#[structopt(short = "u", default_value = "https://localhost:443/ws")]
url: String,
/// Message to send. Set it to '-' to read stdin to send,
/// leave it blank to use stdin as console loop to send multiple messages.
#[structopt(short = "m", default_value = "")]
msg: String,
}
mod danger {
use rustls::{
self, Certificate, RootCertStore, ServerCertVerified, ServerCertVerifier, TLSError,
};
use webpki;
pub struct NoCertificateVerification {}
impl ServerCertVerifier for NoCertificateVerification {
fn verify_server_cert(
&self,
_roots: &RootCertStore,
_presented_certs: &[Certificate],
_dns_name: webpki::DNSNameRef<'_>,
_ocsp: &[u8],
) -> Result<ServerCertVerified, TLSError> {
Ok(ServerCertVerified::assertion())
}
}
}
fn main() -> io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
let opt = Opt::from_args();
// let sys = System::new("ws-client");
System::run(move || {
let mut cfg = ClientConfig::new();
// let protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
// cfg.set_protocols(&protos);
cfg.dangerous()
.set_certificate_verifier(Arc::new(danger::NoCertificateVerification {}));
let client = Client::build()
.connector(Connector::new().rustls(Arc::new(cfg)).finish())
.finish();
// sys.block_on(
Arbiter::spawn(lazy(move || {
client
.ws(&opt.url)
.connect()
.map_err(|e| panic!("{}", e))
.map(move |(response, framed)| {
let sys = System::current();
if response.status()!= StatusCode::SWITCHING_PROTOCOLS {
sys.stop();
}
let (sink, stream) = framed.split();
let addr = WsClient::create(|ctx| {
WsClient::add_stream(stream, ctx);
WsClient(SinkWrite::new(sink, ctx))
});
let read_stdin = || -> String {
let mut cmd = String::new();
if io::stdin().read_line(&mut cmd).is_err() {
println!("error");
}
cmd
};
if opt.msg.is_empty() {
// start console loop
thread::spawn(move || loop {
addr.do_send(ClientCommand(read_stdin()));
});
} else if opt.msg == "-" {
addr.do_send(ClientCommand(read_stdin()));
sys.stop();
} else {
addr.do_send(ClientCommand(opt.msg));
sys.stop();
}
})
}));
})
// ).unwrap();
// sys.block_on(
// ).unwrap();
// Arbiter::spawn(
// TcpStream::connect(&addr)
// .and_then(|stream| {
// let addr = WsClient::create(|ctx| {
// let (r, w) = stream.split();
// WsClient::add_stream(
// FramedRead::new(r, codec::ClientWsCodec),
// ctx,
// );
// WsClient {
// framed: FramedWrite::new(
// w,
// codec::ClientWsCodec,
// ctx,
// ),
// }
// });
// // start console loop
// thread::spawn(move || loop {
// let mut cmd = String::new();
// if io::stdin().read_line(&mut cmd).is_err() {
// println!("error");
// return;
// }
// addr.do_send(ClientCommand(cmd));
// });
// ok(())
// })
// .map_err(|e| {
// println!("Can not connect to server: {}", e);
// process::exit(1)
// }),
// );
// println!("Running ws client");
// sys.run()
}
// struct WsClient {
// framed: FramedWrite<WriteHalf<TcpStream>, codec::ClientWsCodec>,
// }
// #[derive(Message)]
// struct ClientCommand(String);
// impl Actor for WsClient {
// type Context = Context<Self>;
// fn started(&mut self, ctx: &mut Context<Self>) {
// // start heartbeats otherwise server will disconnect after 10 seconds
// self.hb(ctx)
// }
// fn stopped(&mut self, _: &mut Context<Self>) {
// println!("Disconnected");
// // Stop application on disconnect
// System::current().stop();
// }
// }
// impl WsClient {
// fn hb(&self, ctx: &mut Context<Self>) {
// ctx.run_later(Duration::new(, 0), |act, ctx| {
// act.framed.write(codec::WsRequest::Ping);
// act.hb(ctx);
// // client should also check for a timeout here, similar to the
// // server code
// });
// }
// }
// impl actix::io::WriteHandler<io::Error> for WsClient {}
// /// Handle stdin commands
// impl Handler<ClientCommand> for WsClient {
// type Result = ();
// fn handle(&mut self, msg: ClientCommand, _: &mut Context<Self>) {
// let m = msg.0.trim();
// if m.is_empty() {
// return;
// }
// // we check for /sss type of messages
// // if m.starts_with('/') {
// // let v: Vec<&str> = m.splitn(2,'').collect();
// // match v[0] {
// // "/list" => {
// // self.framed.write(codec::WsRequest::List);
// // }
// // "/join" => {
// // if v.len() == 2 {
// // self.framed.write(codec::WsRequest::Join(v[1].to_owned()));
// // } else {
// // println!("!!! room name is required");
// // }
// // }
// // _ => println!("!!! unknown command"),
// // }
// // } else {
// self.framed.write(codec::WsRequest::Message(m.to_owned()));
// // }
// }
// }
// /// Server communication
// impl StreamHandler<codec::WsResponse, io::Error> for WsClient {
// fn handle(&mut self, msg: codec::WsResponse, _: &mut Context<Self>) {
// match msg {
// codec::WsResponse::Message(ref msg) => {
// println!("message: {}", msg);
// }
// // codec::WsResponse::Joined(ref msg) => {
// // println!("!!! joined: {}", msg);
// // }
// // codec::WsResponse::Rooms(rooms) => {
// // println!("\n!!! Available rooms:");
// // for room in rooms {
// // println!("{}", room);
// // }
// // println!("");
// // }
// _ => (),
// }
// }
// }
struct | <T>(SinkWrite<SplitSink<Framed<T, Codec>>>)
where
T: AsyncRead + AsyncWrite;
#[derive(Message)]
struct ClientCommand(String);
impl<T:'static> Actor for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Context<Self>) {
// start heartbeats otherwise server will disconnect after 10 seconds
self.hb(ctx)
}
fn stopped(&mut self, _: &mut Context<Self>) {
info!("Disconnected");
// Stop application on disconnect
System::current().stop();
}
}
impl<T:'static> WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn hb(&self, ctx: &mut Context<Self>) {
ctx.run_later(HEARTBEAT_INTERVAL, |act, ctx| {
act.0.write(Message::Ping(String::new())).unwrap();
act.hb(ctx);
// client should also check for a timeout here, similar to the
// server code
});
}
}
/// Handle stdin commands
impl<T:'static> Handler<ClientCommand> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Result = ();
fn handle(&mut self, msg: ClientCommand, _ctx: &mut Context<Self>) {
self.0.write(Message::Text(msg.0)).unwrap();
}
}
/// Handle server websocket messages
impl<T:'static> StreamHandler<Frame, WsProtocolError> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn handle(&mut self, msg: Frame, _ctx: &mut Context<Self>) {
match msg {
Frame::Text(txt) => println!("Server: {:?}", txt),
_ => (),
}
}
fn started(&mut self, _ctx: &mut Context<Self>) {
info!("Connected");
}
fn finished(&mut self, ctx: &mut Context<Self>) {
info!("Server disconnected");
ctx.stop()
}
}
impl<T:'static> WriteHandler<WsProtocolError> for WsClient<T> where T: AsyncRead + AsyncWrite {}
| WsClient | identifier_name |
client.rs | // #[macro_use]
extern crate actix;
// extern crate byteorder;
// extern crate bytes;
extern crate futures;
extern crate serde;
extern crate serde_json;
// extern crate tokio_io;
// extern crate tokio_tcp;
extern crate awc;
extern crate rustls;
extern crate structopt;
#[macro_use]
extern crate log;
extern crate env_logger;
// #[macro_use]
extern crate serde_derive;
use actix::{
// prelude::*, io::FramedWrite
io::{SinkWrite, WriteHandler},
prelude::*,
Actor,
ActorContext,
AsyncContext,
Context,
Handler,
StreamHandler,
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use futures::{
lazy,
/* future::ok, */ stream::{SplitSink, Stream},
Future,
};
use std::{
io,
// str::FromStr,
// time::Duration,
sync::Arc,
thread,
// net, process, thread,
};
// use tokio_io::{AsyncRead, io::WriteHalf};
// use tokio_tcp::TcpStream;
use awc::{
error::WsProtocolError,
http::StatusCode,
ws::{Codec, Frame, Message},
Client, Connector,
};
use rustls::ClientConfig;
use structopt::StructOpt;
// use webpki;
// use webpki_roots;
// mod codec;
// mod server;
// mod ws;
// mod util;
mod ws_var;
use ws_var::HEARTBEAT_INTERVAL;
#[derive(StructOpt, Debug, Clone)]
/// Generalized WebSocket Client
pub struct Opt {
/// Address to connect
#[structopt(short = "u", default_value = "https://localhost:443/ws")]
url: String,
/// Message to send. Set it to '-' to read stdin to send,
/// leave it blank to use stdin as console loop to send multiple messages.
#[structopt(short = "m", default_value = "")]
msg: String,
}
mod danger {
use rustls::{
self, Certificate, RootCertStore, ServerCertVerified, ServerCertVerifier, TLSError,
};
use webpki;
pub struct NoCertificateVerification {}
impl ServerCertVerifier for NoCertificateVerification {
fn verify_server_cert(
&self,
_roots: &RootCertStore,
_presented_certs: &[Certificate],
_dns_name: webpki::DNSNameRef<'_>,
_ocsp: &[u8],
) -> Result<ServerCertVerified, TLSError> {
Ok(ServerCertVerified::assertion())
}
}
}
fn main() -> io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
let opt = Opt::from_args();
// let sys = System::new("ws-client");
System::run(move || {
let mut cfg = ClientConfig::new();
// let protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
// cfg.set_protocols(&protos);
cfg.dangerous()
.set_certificate_verifier(Arc::new(danger::NoCertificateVerification {}));
let client = Client::build()
.connector(Connector::new().rustls(Arc::new(cfg)).finish())
.finish();
// sys.block_on(
Arbiter::spawn(lazy(move || {
client
.ws(&opt.url)
.connect()
.map_err(|e| panic!("{}", e))
.map(move |(response, framed)| {
let sys = System::current();
if response.status()!= StatusCode::SWITCHING_PROTOCOLS {
sys.stop();
}
let (sink, stream) = framed.split();
let addr = WsClient::create(|ctx| {
WsClient::add_stream(stream, ctx);
WsClient(SinkWrite::new(sink, ctx))
});
let read_stdin = || -> String {
let mut cmd = String::new();
if io::stdin().read_line(&mut cmd).is_err() {
println!("error");
}
cmd
};
if opt.msg.is_empty() {
// start console loop
thread::spawn(move || loop {
addr.do_send(ClientCommand(read_stdin()));
});
} else if opt.msg == "-" | else {
addr.do_send(ClientCommand(opt.msg));
sys.stop();
}
})
}));
})
// ).unwrap();
// sys.block_on(
// ).unwrap();
// Arbiter::spawn(
// TcpStream::connect(&addr)
// .and_then(|stream| {
// let addr = WsClient::create(|ctx| {
// let (r, w) = stream.split();
// WsClient::add_stream(
// FramedRead::new(r, codec::ClientWsCodec),
// ctx,
// );
// WsClient {
// framed: FramedWrite::new(
// w,
// codec::ClientWsCodec,
// ctx,
// ),
// }
// });
// // start console loop
// thread::spawn(move || loop {
// let mut cmd = String::new();
// if io::stdin().read_line(&mut cmd).is_err() {
// println!("error");
// return;
// }
// addr.do_send(ClientCommand(cmd));
// });
// ok(())
// })
// .map_err(|e| {
// println!("Can not connect to server: {}", e);
// process::exit(1)
// }),
// );
// println!("Running ws client");
// sys.run()
}
// struct WsClient {
// framed: FramedWrite<WriteHalf<TcpStream>, codec::ClientWsCodec>,
// }
// #[derive(Message)]
// struct ClientCommand(String);
// impl Actor for WsClient {
// type Context = Context<Self>;
// fn started(&mut self, ctx: &mut Context<Self>) {
// // start heartbeats otherwise server will disconnect after 10 seconds
// self.hb(ctx)
// }
// fn stopped(&mut self, _: &mut Context<Self>) {
// println!("Disconnected");
// // Stop application on disconnect
// System::current().stop();
// }
// }
// impl WsClient {
// fn hb(&self, ctx: &mut Context<Self>) {
// ctx.run_later(Duration::new(, 0), |act, ctx| {
// act.framed.write(codec::WsRequest::Ping);
// act.hb(ctx);
// // client should also check for a timeout here, similar to the
// // server code
// });
// }
// }
// impl actix::io::WriteHandler<io::Error> for WsClient {}
// /// Handle stdin commands
// impl Handler<ClientCommand> for WsClient {
// type Result = ();
// fn handle(&mut self, msg: ClientCommand, _: &mut Context<Self>) {
// let m = msg.0.trim();
// if m.is_empty() {
// return;
// }
// // we check for /sss type of messages
// // if m.starts_with('/') {
// // let v: Vec<&str> = m.splitn(2,'').collect();
// // match v[0] {
// // "/list" => {
// // self.framed.write(codec::WsRequest::List);
// // }
// // "/join" => {
// // if v.len() == 2 {
// // self.framed.write(codec::WsRequest::Join(v[1].to_owned()));
// // } else {
// // println!("!!! room name is required");
// // }
// // }
// // _ => println!("!!! unknown command"),
// // }
// // } else {
// self.framed.write(codec::WsRequest::Message(m.to_owned()));
// // }
// }
// }
// /// Server communication
// impl StreamHandler<codec::WsResponse, io::Error> for WsClient {
// fn handle(&mut self, msg: codec::WsResponse, _: &mut Context<Self>) {
// match msg {
// codec::WsResponse::Message(ref msg) => {
// println!("message: {}", msg);
// }
// // codec::WsResponse::Joined(ref msg) => {
// // println!("!!! joined: {}", msg);
// // }
// // codec::WsResponse::Rooms(rooms) => {
// // println!("\n!!! Available rooms:");
// // for room in rooms {
// // println!("{}", room);
// // }
// // println!("");
// // }
// _ => (),
// }
// }
// }
struct WsClient<T>(SinkWrite<SplitSink<Framed<T, Codec>>>)
where
T: AsyncRead + AsyncWrite;
#[derive(Message)]
struct ClientCommand(String);
impl<T:'static> Actor for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Context<Self>) {
// start heartbeats otherwise server will disconnect after 10 seconds
self.hb(ctx)
}
fn stopped(&mut self, _: &mut Context<Self>) {
info!("Disconnected");
// Stop application on disconnect
System::current().stop();
}
}
impl<T:'static> WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn hb(&self, ctx: &mut Context<Self>) {
ctx.run_later(HEARTBEAT_INTERVAL, |act, ctx| {
act.0.write(Message::Ping(String::new())).unwrap();
act.hb(ctx);
// client should also check for a timeout here, similar to the
// server code
});
}
}
/// Handle stdin commands
impl<T:'static> Handler<ClientCommand> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Result = ();
fn handle(&mut self, msg: ClientCommand, _ctx: &mut Context<Self>) {
self.0.write(Message::Text(msg.0)).unwrap();
}
}
/// Handle server websocket messages
impl<T:'static> StreamHandler<Frame, WsProtocolError> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn handle(&mut self, msg: Frame, _ctx: &mut Context<Self>) {
match msg {
Frame::Text(txt) => println!("Server: {:?}", txt),
_ => (),
}
}
fn started(&mut self, _ctx: &mut Context<Self>) {
info!("Connected");
}
fn finished(&mut self, ctx: &mut Context<Self>) {
info!("Server disconnected");
ctx.stop()
}
}
impl<T:'static> WriteHandler<WsProtocolError> for WsClient<T> where T: AsyncRead + AsyncWrite {}
| {
addr.do_send(ClientCommand(read_stdin()));
sys.stop();
} | conditional_block |
main.rs | extern crate sdl2;
extern crate ears;
mod chart;
mod guitarplaythrough;
use std::time::{Duration, Instant};
use sdl2::event::Event;
use sdl2::pixels;
use sdl2::keyboard::Keycode;
use sdl2::gfx::primitives::DrawRenderer;
use ears::{AudioController};
use guitarplaythrough::*;
const SCREEN_WIDTH: u32 = 800;
const SCREEN_HEIGHT: u32 = 600;
enum GameButton {
Green,
Red,
Yellow,
Blue,
Orange,
}
enum GameInputAction {
Quit,
ButtonDown(GameButton),
ButtonUp(GameButton),
Strum,
}
impl GameButton {
fn to_guitar(self: &Self) -> Fret {
match self {
GameButton::Green => Fret::G,
GameButton::Red => Fret::R,
GameButton::Yellow => Fret::Y,
GameButton::Blue => Fret::B,
GameButton::Orange => Fret::O,
}
}
}
impl GameInputAction {
fn to_guitar_action(self: &Self) -> Option<GuitarInputAction> {
match self {
GameInputAction::Quit => None,
GameInputAction::ButtonDown(button) => Some(GuitarInputAction::FretDown(button.to_guitar())),
GameInputAction::ButtonUp(button) => Some(GuitarInputAction::FretUp(button.to_guitar())),
GameInputAction::Strum => Some(GuitarInputAction::Strum),
}
}
}
enum GameInputEffect {
Quit,
GuitarEffect(GuitarGameEffect),
}
fn draw_fret<T: sdl2::render::RenderTarget>(canvas: &sdl2::render::Canvas<T>, enabled: bool, x: i16, y: i16, radius: i16, color: pixels::Color) -> Result<(), String> {
if enabled {
canvas.filled_circle(x, y, radius, color)
} else {
canvas.circle(x, y, radius, color)
}
}
enum FrameLimit {
Vsync,
Cap(u32),
}
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
/* joystick initialization */
let joystick_subsystem = sdl_context.joystick()?;
let available = joystick_subsystem.num_joysticks()
.map_err(|e| format!("can't enumerate joysticks: {}", e))?;
println!("{} joysticks available", available);
// Iterate over all available joysticks and stop once we manage to open one.
let mut joystick = (0..available).find_map(|id| match joystick_subsystem.open(id) {
Ok(c) => {
println!("Success: opened \"{}\"", c.name());
Some(c)
},
Err(e) => {
println!("failed: {:?}", e);
None
},
}).expect("Couldn't open any joystick");
// Print the joystick's power level
println!("\"{}\" power level: {:?}", joystick.name(), joystick.power_level()
.map_err(|e| e.to_string())?);
/* window initialization */
let video_subsys = sdl_context.video()?;
let window = video_subsys.window("bumpit", SCREEN_WIDTH, SCREEN_HEIGHT)
.position_centered()
.opengl()
.build()
.map_err(|e| e.to_string())?;
let mut canvas = window.into_canvas().build().map_err(|e| e.to_string())?;
let mut events = sdl_context.event_pump()?;
let mut playthrough: GuitarPlaythrough = std::fs::read_to_string("Songs/notes.chart")
.map_err(|e| e.to_string())
.and_then(|file| chart::read(file.as_ref())
.map_err(|e| { println!("Error: {:?}", e); return String::from("couldn't parse chart") })) // TODO: error to string
.and_then(|chart| GuitarPlaythrough::new(chart)
.map_err(|s| String::from(s)))?;
fn draw<T: sdl2::render::RenderTarget>(canvas: &mut sdl2::render::Canvas<T>, playthrough: &GuitarPlaythrough, time: f32) {
canvas.set_draw_color(pixels::Color::RGB(0, 0, 0));
canvas.clear();
for i in 0..playthrough.notes_hit {
let _ = draw_fret(&canvas, true, (i as i16) * 10, 10, 5, pixels::Color::RGB(255, 255, 255));
}
let frets = playthrough.frets;
let _ = draw_fret(&canvas, frets[Fret::G as usize], 50, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::R as usize], 150, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 0, 0));
let _ = draw_fret(&canvas, frets[Fret::Y as usize], 250, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::B as usize], 350, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 0, 128));
let _ = draw_fret(&canvas, frets[Fret::O as usize], 450, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(192, 128, 00));
for note in &playthrough.chart.notes {
let position_past_time = playthrough.chart.ticks_to_ms(note.ticks) - time;
let progress_on_screen = position_past_time / 1000f32;
if progress_on_screen > 1f32 || progress_on_screen < 0f32 {
continue;
}
let y = ((1f32 - progress_on_screen) * (SCREEN_HEIGHT as f32)) as i16 - 75;
if note.is_open() {
let _ = canvas.rectangle(50, y - 2, 462, y + 2, pixels::Color::RGB(200, 60, 200));
} else {
note.chord.iter()
.enumerate()
.filter(|(_i, chord_note)| **chord_note)
.for_each(|(note_index, _chord_note)| {
let _ = draw_fret(&canvas, true, 50 + (note_index as i16) * 100, y, 17, pixels::Color::RGB(60, 80, 100));
});
}
}
canvas.present();
};
fn input<'a>(events: &'a mut sdl2::EventPump) -> impl Iterator<Item = Option<GameInputAction>> + 'a {
events.poll_iter()
.map(|event| match event {
Event::Quit {..} => Some(GameInputAction::Quit),
Event::KeyDown { keycode: Some(Keycode::Escape),.. } => Some(GameInputAction::Quit),
Event::KeyDown { keycode : Some(Keycode::Z),.. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::KeyDown { keycode : Some(Keycode::X),.. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::KeyDown { keycode : Some(Keycode::C),.. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::KeyDown { keycode : Some(Keycode::V),.. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::KeyDown { keycode : Some(Keycode::B),.. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::KeyUp { keycode : Some(Keycode::Z),.. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::KeyUp { keycode : Some(Keycode::X),.. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::KeyUp { keycode : Some(Keycode::C),.. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::KeyUp { keycode : Some(Keycode::V),.. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::KeyUp { keycode : Some(Keycode::B),.. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::KeyDown { keycode : Some(Keycode::Space),.. } => Some(GameInputAction::Strum),
Event::JoyButtonDown { button_idx : 0,.. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::JoyButtonDown { button_idx : 1,.. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::JoyButtonDown { button_idx : 3,.. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::JoyButtonDown { button_idx : 2,.. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::JoyButtonDown { button_idx : 4,.. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::JoyButtonUp { button_idx : 0,.. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::JoyButtonUp { button_idx : 1,.. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::JoyButtonUp { button_idx : 3,.. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::JoyButtonUp { button_idx : 2,.. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::JoyButtonUp { button_idx : 4,.. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Up,.. } => Some(GameInputAction::Strum),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Down,.. } => Some(GameInputAction::Strum),
_ => None
})
}
// for power-saving. if Some, the game will sleep for
const FRAME_LIMIT: Option<FrameLimit> = Option::Some(FrameLimit::Cap(120));
// TODO: enable vsync based on frame_limit
// https://wiki.libsdl.org/SDL_GL_SetSwapInterval
// TODO: process inputs more frequently than once per frame?
// avoidable if we have accurate input event timestamps? (+ assumption our processing is short)
// TODO: when frame_limit is FPS cap, do measurements for sleep interval
// that results in that frequency (at runtime)
// and ensure game loop handles huge outliers in sleep wakeup time
let mut music = ears::Sound::new("Songs/song.ogg")?;
music.play();
let mut previous_frame_time = Instant::now();
let mut last_playhead_pos_ms = 0f32;
let mut song_time_ms = 0f32;
let mut run = true;
while run {
// https://www.reddit.com/r/gamedev/comments/13y26t/how_do_rhythm_games_stay_in_sync_with_the_music/c78aawd/
let this_frame_time = Instant::now();
song_time_ms += this_frame_time.duration_since(previous_frame_time).as_millis() as f32;
previous_frame_time = this_frame_time;
let playhead_pos_ms = music.get_offset() * 1000f32;
if playhead_pos_ms!= last_playhead_pos_ms {
song_time_ms = (song_time_ms + playhead_pos_ms) / 2f32;
last_playhead_pos_ms = playhead_pos_ms;
}
let effects = input(&mut events)
.filter_map(|action| match action {
Some(GameInputAction::Quit) => Some(GameInputEffect::Quit),
Some(action) => match action.to_guitar_action() {
Some(guitar_action) => {
// sdl's event timestamps are always later than the OS timestamp
// so just assume that events are happening at this instant
// TODO: can we do better?
// TODO: track inputs for replays?
playthrough.apply(&guitar_action, song_time_ms).map(|e| GameInputEffect::GuitarEffect(e))
},
None => None,
},
None => None,
});
effects.for_each(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (), | .map(|e| GameInputEffect::GuitarEffect(e))
.map(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (),
}
}
});
draw(&mut canvas, &playthrough, song_time_ms);
match FRAME_LIMIT {
Some(FrameLimit::Vsync) => (), // present() waits for vsync if on
Some(FrameLimit::Cap(cap)) => {
::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / cap));
},
None => (),
}
}
Ok(())
} | }
}
});
playthrough.update_time(song_time_ms) | random_line_split |
main.rs | extern crate sdl2;
extern crate ears;
mod chart;
mod guitarplaythrough;
use std::time::{Duration, Instant};
use sdl2::event::Event;
use sdl2::pixels;
use sdl2::keyboard::Keycode;
use sdl2::gfx::primitives::DrawRenderer;
use ears::{AudioController};
use guitarplaythrough::*;
const SCREEN_WIDTH: u32 = 800;
const SCREEN_HEIGHT: u32 = 600;
enum GameButton {
Green,
Red,
Yellow,
Blue,
Orange,
}
enum GameInputAction {
Quit,
ButtonDown(GameButton),
ButtonUp(GameButton),
Strum,
}
impl GameButton {
fn to_guitar(self: &Self) -> Fret {
match self {
GameButton::Green => Fret::G,
GameButton::Red => Fret::R,
GameButton::Yellow => Fret::Y,
GameButton::Blue => Fret::B,
GameButton::Orange => Fret::O,
}
}
}
impl GameInputAction {
fn to_guitar_action(self: &Self) -> Option<GuitarInputAction> {
match self {
GameInputAction::Quit => None,
GameInputAction::ButtonDown(button) => Some(GuitarInputAction::FretDown(button.to_guitar())),
GameInputAction::ButtonUp(button) => Some(GuitarInputAction::FretUp(button.to_guitar())),
GameInputAction::Strum => Some(GuitarInputAction::Strum),
}
}
}
enum | {
Quit,
GuitarEffect(GuitarGameEffect),
}
fn draw_fret<T: sdl2::render::RenderTarget>(canvas: &sdl2::render::Canvas<T>, enabled: bool, x: i16, y: i16, radius: i16, color: pixels::Color) -> Result<(), String> {
if enabled {
canvas.filled_circle(x, y, radius, color)
} else {
canvas.circle(x, y, radius, color)
}
}
enum FrameLimit {
Vsync,
Cap(u32),
}
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
/* joystick initialization */
let joystick_subsystem = sdl_context.joystick()?;
let available = joystick_subsystem.num_joysticks()
.map_err(|e| format!("can't enumerate joysticks: {}", e))?;
println!("{} joysticks available", available);
// Iterate over all available joysticks and stop once we manage to open one.
let mut joystick = (0..available).find_map(|id| match joystick_subsystem.open(id) {
Ok(c) => {
println!("Success: opened \"{}\"", c.name());
Some(c)
},
Err(e) => {
println!("failed: {:?}", e);
None
},
}).expect("Couldn't open any joystick");
// Print the joystick's power level
println!("\"{}\" power level: {:?}", joystick.name(), joystick.power_level()
.map_err(|e| e.to_string())?);
/* window initialization */
let video_subsys = sdl_context.video()?;
let window = video_subsys.window("bumpit", SCREEN_WIDTH, SCREEN_HEIGHT)
.position_centered()
.opengl()
.build()
.map_err(|e| e.to_string())?;
let mut canvas = window.into_canvas().build().map_err(|e| e.to_string())?;
let mut events = sdl_context.event_pump()?;
let mut playthrough: GuitarPlaythrough = std::fs::read_to_string("Songs/notes.chart")
.map_err(|e| e.to_string())
.and_then(|file| chart::read(file.as_ref())
.map_err(|e| { println!("Error: {:?}", e); return String::from("couldn't parse chart") })) // TODO: error to string
.and_then(|chart| GuitarPlaythrough::new(chart)
.map_err(|s| String::from(s)))?;
fn draw<T: sdl2::render::RenderTarget>(canvas: &mut sdl2::render::Canvas<T>, playthrough: &GuitarPlaythrough, time: f32) {
canvas.set_draw_color(pixels::Color::RGB(0, 0, 0));
canvas.clear();
for i in 0..playthrough.notes_hit {
let _ = draw_fret(&canvas, true, (i as i16) * 10, 10, 5, pixels::Color::RGB(255, 255, 255));
}
let frets = playthrough.frets;
let _ = draw_fret(&canvas, frets[Fret::G as usize], 50, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::R as usize], 150, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 0, 0));
let _ = draw_fret(&canvas, frets[Fret::Y as usize], 250, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::B as usize], 350, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 0, 128));
let _ = draw_fret(&canvas, frets[Fret::O as usize], 450, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(192, 128, 00));
for note in &playthrough.chart.notes {
let position_past_time = playthrough.chart.ticks_to_ms(note.ticks) - time;
let progress_on_screen = position_past_time / 1000f32;
if progress_on_screen > 1f32 || progress_on_screen < 0f32 {
continue;
}
let y = ((1f32 - progress_on_screen) * (SCREEN_HEIGHT as f32)) as i16 - 75;
if note.is_open() {
let _ = canvas.rectangle(50, y - 2, 462, y + 2, pixels::Color::RGB(200, 60, 200));
} else {
note.chord.iter()
.enumerate()
.filter(|(_i, chord_note)| **chord_note)
.for_each(|(note_index, _chord_note)| {
let _ = draw_fret(&canvas, true, 50 + (note_index as i16) * 100, y, 17, pixels::Color::RGB(60, 80, 100));
});
}
}
canvas.present();
};
fn input<'a>(events: &'a mut sdl2::EventPump) -> impl Iterator<Item = Option<GameInputAction>> + 'a {
events.poll_iter()
.map(|event| match event {
Event::Quit {..} => Some(GameInputAction::Quit),
Event::KeyDown { keycode: Some(Keycode::Escape),.. } => Some(GameInputAction::Quit),
Event::KeyDown { keycode : Some(Keycode::Z),.. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::KeyDown { keycode : Some(Keycode::X),.. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::KeyDown { keycode : Some(Keycode::C),.. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::KeyDown { keycode : Some(Keycode::V),.. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::KeyDown { keycode : Some(Keycode::B),.. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::KeyUp { keycode : Some(Keycode::Z),.. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::KeyUp { keycode : Some(Keycode::X),.. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::KeyUp { keycode : Some(Keycode::C),.. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::KeyUp { keycode : Some(Keycode::V),.. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::KeyUp { keycode : Some(Keycode::B),.. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::KeyDown { keycode : Some(Keycode::Space),.. } => Some(GameInputAction::Strum),
Event::JoyButtonDown { button_idx : 0,.. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::JoyButtonDown { button_idx : 1,.. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::JoyButtonDown { button_idx : 3,.. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::JoyButtonDown { button_idx : 2,.. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::JoyButtonDown { button_idx : 4,.. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::JoyButtonUp { button_idx : 0,.. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::JoyButtonUp { button_idx : 1,.. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::JoyButtonUp { button_idx : 3,.. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::JoyButtonUp { button_idx : 2,.. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::JoyButtonUp { button_idx : 4,.. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Up,.. } => Some(GameInputAction::Strum),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Down,.. } => Some(GameInputAction::Strum),
_ => None
})
}
// for power-saving. if Some, the game will sleep for
const FRAME_LIMIT: Option<FrameLimit> = Option::Some(FrameLimit::Cap(120));
// TODO: enable vsync based on frame_limit
// https://wiki.libsdl.org/SDL_GL_SetSwapInterval
// TODO: process inputs more frequently than once per frame?
// avoidable if we have accurate input event timestamps? (+ assumption our processing is short)
// TODO: when frame_limit is FPS cap, do measurements for sleep interval
// that results in that frequency (at runtime)
// and ensure game loop handles huge outliers in sleep wakeup time
let mut music = ears::Sound::new("Songs/song.ogg")?;
music.play();
let mut previous_frame_time = Instant::now();
let mut last_playhead_pos_ms = 0f32;
let mut song_time_ms = 0f32;
let mut run = true;
while run {
// https://www.reddit.com/r/gamedev/comments/13y26t/how_do_rhythm_games_stay_in_sync_with_the_music/c78aawd/
let this_frame_time = Instant::now();
song_time_ms += this_frame_time.duration_since(previous_frame_time).as_millis() as f32;
previous_frame_time = this_frame_time;
let playhead_pos_ms = music.get_offset() * 1000f32;
if playhead_pos_ms!= last_playhead_pos_ms {
song_time_ms = (song_time_ms + playhead_pos_ms) / 2f32;
last_playhead_pos_ms = playhead_pos_ms;
}
let effects = input(&mut events)
.filter_map(|action| match action {
Some(GameInputAction::Quit) => Some(GameInputEffect::Quit),
Some(action) => match action.to_guitar_action() {
Some(guitar_action) => {
// sdl's event timestamps are always later than the OS timestamp
// so just assume that events are happening at this instant
// TODO: can we do better?
// TODO: track inputs for replays?
playthrough.apply(&guitar_action, song_time_ms).map(|e| GameInputEffect::GuitarEffect(e))
},
None => None,
},
None => None,
});
effects.for_each(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (),
}
}
});
playthrough.update_time(song_time_ms)
.map(|e| GameInputEffect::GuitarEffect(e))
.map(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (),
}
}
});
draw(&mut canvas, &playthrough, song_time_ms);
match FRAME_LIMIT {
Some(FrameLimit::Vsync) => (), // present() waits for vsync if on
Some(FrameLimit::Cap(cap)) => {
::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / cap));
},
None => (),
}
}
Ok(())
}
| GameInputEffect | identifier_name |
main.rs | extern crate sdl2;
extern crate ears;
mod chart;
mod guitarplaythrough;
use std::time::{Duration, Instant};
use sdl2::event::Event;
use sdl2::pixels;
use sdl2::keyboard::Keycode;
use sdl2::gfx::primitives::DrawRenderer;
use ears::{AudioController};
use guitarplaythrough::*;
const SCREEN_WIDTH: u32 = 800;
const SCREEN_HEIGHT: u32 = 600;
enum GameButton {
Green,
Red,
Yellow,
Blue,
Orange,
}
enum GameInputAction {
Quit,
ButtonDown(GameButton),
ButtonUp(GameButton),
Strum,
}
impl GameButton {
fn to_guitar(self: &Self) -> Fret {
match self {
GameButton::Green => Fret::G,
GameButton::Red => Fret::R,
GameButton::Yellow => Fret::Y,
GameButton::Blue => Fret::B,
GameButton::Orange => Fret::O,
}
}
}
impl GameInputAction {
fn to_guitar_action(self: &Self) -> Option<GuitarInputAction> {
match self {
GameInputAction::Quit => None,
GameInputAction::ButtonDown(button) => Some(GuitarInputAction::FretDown(button.to_guitar())),
GameInputAction::ButtonUp(button) => Some(GuitarInputAction::FretUp(button.to_guitar())),
GameInputAction::Strum => Some(GuitarInputAction::Strum),
}
}
}
enum GameInputEffect {
Quit,
GuitarEffect(GuitarGameEffect),
}
fn draw_fret<T: sdl2::render::RenderTarget>(canvas: &sdl2::render::Canvas<T>, enabled: bool, x: i16, y: i16, radius: i16, color: pixels::Color) -> Result<(), String> |
enum FrameLimit {
Vsync,
Cap(u32),
}
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
/* joystick initialization */
let joystick_subsystem = sdl_context.joystick()?;
let available = joystick_subsystem.num_joysticks()
.map_err(|e| format!("can't enumerate joysticks: {}", e))?;
println!("{} joysticks available", available);
// Iterate over all available joysticks and stop once we manage to open one.
let mut joystick = (0..available).find_map(|id| match joystick_subsystem.open(id) {
Ok(c) => {
println!("Success: opened \"{}\"", c.name());
Some(c)
},
Err(e) => {
println!("failed: {:?}", e);
None
},
}).expect("Couldn't open any joystick");
// Print the joystick's power level
println!("\"{}\" power level: {:?}", joystick.name(), joystick.power_level()
.map_err(|e| e.to_string())?);
/* window initialization */
let video_subsys = sdl_context.video()?;
let window = video_subsys.window("bumpit", SCREEN_WIDTH, SCREEN_HEIGHT)
.position_centered()
.opengl()
.build()
.map_err(|e| e.to_string())?;
let mut canvas = window.into_canvas().build().map_err(|e| e.to_string())?;
let mut events = sdl_context.event_pump()?;
let mut playthrough: GuitarPlaythrough = std::fs::read_to_string("Songs/notes.chart")
.map_err(|e| e.to_string())
.and_then(|file| chart::read(file.as_ref())
.map_err(|e| { println!("Error: {:?}", e); return String::from("couldn't parse chart") })) // TODO: error to string
.and_then(|chart| GuitarPlaythrough::new(chart)
.map_err(|s| String::from(s)))?;
fn draw<T: sdl2::render::RenderTarget>(canvas: &mut sdl2::render::Canvas<T>, playthrough: &GuitarPlaythrough, time: f32) {
canvas.set_draw_color(pixels::Color::RGB(0, 0, 0));
canvas.clear();
for i in 0..playthrough.notes_hit {
let _ = draw_fret(&canvas, true, (i as i16) * 10, 10, 5, pixels::Color::RGB(255, 255, 255));
}
let frets = playthrough.frets;
let _ = draw_fret(&canvas, frets[Fret::G as usize], 50, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::R as usize], 150, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 0, 0));
let _ = draw_fret(&canvas, frets[Fret::Y as usize], 250, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::B as usize], 350, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 0, 128));
let _ = draw_fret(&canvas, frets[Fret::O as usize], 450, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(192, 128, 00));
for note in &playthrough.chart.notes {
let position_past_time = playthrough.chart.ticks_to_ms(note.ticks) - time;
let progress_on_screen = position_past_time / 1000f32;
if progress_on_screen > 1f32 || progress_on_screen < 0f32 {
continue;
}
let y = ((1f32 - progress_on_screen) * (SCREEN_HEIGHT as f32)) as i16 - 75;
if note.is_open() {
let _ = canvas.rectangle(50, y - 2, 462, y + 2, pixels::Color::RGB(200, 60, 200));
} else {
note.chord.iter()
.enumerate()
.filter(|(_i, chord_note)| **chord_note)
.for_each(|(note_index, _chord_note)| {
let _ = draw_fret(&canvas, true, 50 + (note_index as i16) * 100, y, 17, pixels::Color::RGB(60, 80, 100));
});
}
}
canvas.present();
};
fn input<'a>(events: &'a mut sdl2::EventPump) -> impl Iterator<Item = Option<GameInputAction>> + 'a {
events.poll_iter()
.map(|event| match event {
Event::Quit {..} => Some(GameInputAction::Quit),
Event::KeyDown { keycode: Some(Keycode::Escape),.. } => Some(GameInputAction::Quit),
Event::KeyDown { keycode : Some(Keycode::Z),.. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::KeyDown { keycode : Some(Keycode::X),.. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::KeyDown { keycode : Some(Keycode::C),.. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::KeyDown { keycode : Some(Keycode::V),.. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::KeyDown { keycode : Some(Keycode::B),.. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::KeyUp { keycode : Some(Keycode::Z),.. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::KeyUp { keycode : Some(Keycode::X),.. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::KeyUp { keycode : Some(Keycode::C),.. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::KeyUp { keycode : Some(Keycode::V),.. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::KeyUp { keycode : Some(Keycode::B),.. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::KeyDown { keycode : Some(Keycode::Space),.. } => Some(GameInputAction::Strum),
Event::JoyButtonDown { button_idx : 0,.. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::JoyButtonDown { button_idx : 1,.. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::JoyButtonDown { button_idx : 3,.. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::JoyButtonDown { button_idx : 2,.. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::JoyButtonDown { button_idx : 4,.. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::JoyButtonUp { button_idx : 0,.. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::JoyButtonUp { button_idx : 1,.. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::JoyButtonUp { button_idx : 3,.. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::JoyButtonUp { button_idx : 2,.. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::JoyButtonUp { button_idx : 4,.. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Up,.. } => Some(GameInputAction::Strum),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Down,.. } => Some(GameInputAction::Strum),
_ => None
})
}
// for power-saving. if Some, the game will sleep for
const FRAME_LIMIT: Option<FrameLimit> = Option::Some(FrameLimit::Cap(120));
// TODO: enable vsync based on frame_limit
// https://wiki.libsdl.org/SDL_GL_SetSwapInterval
// TODO: process inputs more frequently than once per frame?
// avoidable if we have accurate input event timestamps? (+ assumption our processing is short)
// TODO: when frame_limit is FPS cap, do measurements for sleep interval
// that results in that frequency (at runtime)
// and ensure game loop handles huge outliers in sleep wakeup time
let mut music = ears::Sound::new("Songs/song.ogg")?;
music.play();
let mut previous_frame_time = Instant::now();
let mut last_playhead_pos_ms = 0f32;
let mut song_time_ms = 0f32;
let mut run = true;
while run {
// https://www.reddit.com/r/gamedev/comments/13y26t/how_do_rhythm_games_stay_in_sync_with_the_music/c78aawd/
let this_frame_time = Instant::now();
song_time_ms += this_frame_time.duration_since(previous_frame_time).as_millis() as f32;
previous_frame_time = this_frame_time;
let playhead_pos_ms = music.get_offset() * 1000f32;
if playhead_pos_ms!= last_playhead_pos_ms {
song_time_ms = (song_time_ms + playhead_pos_ms) / 2f32;
last_playhead_pos_ms = playhead_pos_ms;
}
let effects = input(&mut events)
.filter_map(|action| match action {
Some(GameInputAction::Quit) => Some(GameInputEffect::Quit),
Some(action) => match action.to_guitar_action() {
Some(guitar_action) => {
// sdl's event timestamps are always later than the OS timestamp
// so just assume that events are happening at this instant
// TODO: can we do better?
// TODO: track inputs for replays?
playthrough.apply(&guitar_action, song_time_ms).map(|e| GameInputEffect::GuitarEffect(e))
},
None => None,
},
None => None,
});
effects.for_each(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (),
}
}
});
playthrough.update_time(song_time_ms)
.map(|e| GameInputEffect::GuitarEffect(e))
.map(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (),
}
}
});
draw(&mut canvas, &playthrough, song_time_ms);
match FRAME_LIMIT {
Some(FrameLimit::Vsync) => (), // present() waits for vsync if on
Some(FrameLimit::Cap(cap)) => {
::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / cap));
},
None => (),
}
}
Ok(())
}
| {
if enabled {
canvas.filled_circle(x, y, radius, color)
} else {
canvas.circle(x, y, radius, color)
}
} | identifier_body |
elasticsearch.rs | use crate::{
buffers::Acker,
event::Event,
sinks::util::{
http::{HttpRetryLogic, HttpService},
retries::FixedRetryPolicy,
BatchServiceSink, Buffer, Compression, SinkExt,
},
template::Template,
topology::config::{DataType, SinkConfig},
};
use futures::{stream::iter_ok, Future, Sink};
use http::{Method, Uri};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::time::Duration;
use tower::ServiceBuilder;
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields)]
pub struct ElasticSearchConfig {
pub host: String,
pub index: Option<String>,
pub doc_type: Option<String>,
pub id_key: Option<String>,
pub batch_size: Option<usize>,
pub batch_timeout: Option<u64>,
pub compression: Option<Compression>,
// Tower Request based configuration
pub request_in_flight_limit: Option<usize>,
pub request_timeout_secs: Option<u64>,
pub request_rate_limit_duration_secs: Option<u64>,
pub request_rate_limit_num: Option<u64>,
pub request_retry_attempts: Option<usize>,
pub request_retry_backoff_secs: Option<u64>,
}
#[typetag::serde(name = "elasticsearch")]
impl SinkConfig for ElasticSearchConfig {
fn build(&self, acker: Acker) -> Result<(super::RouterSink, super::Healthcheck), String> {
let sink = es(self.clone(), acker);
let healtcheck = healthcheck(self.host.clone());
Ok((sink, healtcheck))
}
fn input_type(&self) -> DataType {
DataType::Log
}
}
fn es(config: ElasticSearchConfig, acker: Acker) -> super::RouterSink {
let host = config.host.clone();
let id_key = config.id_key.clone();
let gzip = match config.compression.unwrap_or(Compression::Gzip) {
Compression::None => false,
Compression::Gzip => true,
};
let batch_size = config.batch_size.unwrap_or(bytesize::mib(10u64) as usize);
let batch_timeout = config.batch_timeout.unwrap_or(1);
let timeout = config.request_timeout_secs.unwrap_or(60);
let in_flight_limit = config.request_in_flight_limit.unwrap_or(5);
let rate_limit_duration = config.request_rate_limit_duration_secs.unwrap_or(1);
let rate_limit_num = config.request_rate_limit_num.unwrap_or(5);
let retry_attempts = config.request_retry_attempts.unwrap_or(usize::max_value());
let retry_backoff_secs = config.request_retry_backoff_secs.unwrap_or(1);
let index = if let Some(idx) = &config.index {
Template::from(idx.as_str())
} else {
Template::from("vector-%Y.%m.%d")
};
let doc_type = config.clone().doc_type.unwrap_or("_doc".into());
let policy = FixedRetryPolicy::new(
retry_attempts,
Duration::from_secs(retry_backoff_secs),
HttpRetryLogic,
);
let http_service = HttpService::new(move |body: Vec<u8>| {
let uri = format!("{}/_bulk", host);
let uri: Uri = uri.parse().unwrap();
let mut builder = hyper::Request::builder();
builder.method(Method::POST);
builder.uri(uri);
builder.header("Content-Type", "application/x-ndjson");
if gzip {
builder.header("Content-Encoding", "gzip");
}
builder.body(body).unwrap()
});
let service = ServiceBuilder::new()
.concurrency_limit(in_flight_limit)
.rate_limit(rate_limit_num, Duration::from_secs(rate_limit_duration))
.retry(policy)
.timeout(Duration::from_secs(timeout))
.service(http_service);
let sink = BatchServiceSink::new(service, acker)
.batched_with_min(
Buffer::new(gzip),
batch_size,
Duration::from_secs(batch_timeout),
)
.with_flat_map(move |e| iter_ok(encode_event(e, &index, &doc_type, &id_key)));
Box::new(sink)
}
fn encode_event(
event: Event,
index: &Template,
doc_type: &String,
id_key: &Option<String>,
) -> Option<Vec<u8>> {
let index = index
.render_string(&event)
.map_err(|keys| {
warn!(
message = "Keys do not exist on the event. Dropping event.",
?keys
);
})
.ok()?;
let mut action = json!({
"index": {
"_index": index,
"_type": doc_type,
}
});
maybe_set_id(
id_key.as_ref(),
action.pointer_mut("/index").unwrap(),
&event,
);
let mut body = serde_json::to_vec(&action).unwrap();
body.push(b'\n');
serde_json::to_writer(&mut body, &event.into_log().unflatten()).unwrap();
body.push(b'\n');
Some(body)
}
fn healthcheck(host: String) -> super::Healthcheck {
let uri = format!("{}/_cluster/health", host);
let request = Request::get(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
let healthcheck = client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
});
Box::new(healthcheck)
}
fn maybe_set_id(key: Option<impl AsRef<str>>, doc: &mut serde_json::Value, event: &Event) {
if let Some(val) = key.and_then(|k| event.as_log().get(&k.as_ref().into())) {
let val = val.to_string_lossy();
doc.as_object_mut()
.unwrap()
.insert("_id".into(), json!(val));
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Event;
use serde_json::json;
#[test]
fn sets_id_from_custom_field() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({"_id": "bar"}), action);
}
#[test]
fn doesnt_set_id_when_field_missing() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("not_foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
#[test]
fn doesnt_set_id_when_not_configured() {
let id_key: Option<&str> = None;
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
}
#[cfg(test)]
#[cfg(feature = "es-integration-tests")]
mod integration_tests {
use super::*;
use crate::buffers::Acker;
use crate::{
event,
test_util::{block_on, random_events_with_stream, random_string},
topology::config::SinkConfig,
Event,
};
use elastic::client::SyncClientBuilder;
use futures::{Future, Sink};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde_json::{json, Value};
#[test]
fn structures_events_correctly() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
id_key: Some("my_id".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let mut input_event = Event::from("raw log line");
input_event
.as_mut_log()
.insert_explicit("my_id".into(), "42".into());
input_event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let pump = sink.send(input_event.clone());
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(1, response.total());
let hit = response.into_hits().next().unwrap();
assert_eq!("42", hit.id());
let value = hit.into_document().unwrap();
let expected = json!({
"message": "raw log line",
"my_id": "42",
"foo": "bar",
"timestamp": input_event.as_log()[&event::TIMESTAMP],
});
assert_eq!(expected, value);
}
#[test]
fn insert_events() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let (input, events) = random_events_with_stream(100, 100);
let pump = sink.send_all(events);
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(input.len() as u64, response.total());
let input = input
.into_iter()
.map(|rec| serde_json::to_value(rec.into_log().unflatten()).unwrap())
.collect::<Vec<_>>();
for hit in response.into_hits() {
let event = hit.into_document().unwrap();
assert!(input.contains(&event));
}
}
fn gen_index() -> String {
format!("test-{}", random_string(10).to_lowercase())
}
fn flush(host: String) -> impl Future<Item = (), Error = String> {
let uri = format!("{}/_flush", host);
let request = Request::post(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else |
})
}
}
| {
Err(format!("Unexpected status: {}", response.status()))
} | conditional_block |
elasticsearch.rs | use crate::{
buffers::Acker,
event::Event,
sinks::util::{
http::{HttpRetryLogic, HttpService},
retries::FixedRetryPolicy,
BatchServiceSink, Buffer, Compression, SinkExt,
},
template::Template,
topology::config::{DataType, SinkConfig},
};
use futures::{stream::iter_ok, Future, Sink};
use http::{Method, Uri};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::time::Duration;
use tower::ServiceBuilder;
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields)]
pub struct ElasticSearchConfig {
pub host: String,
pub index: Option<String>,
pub doc_type: Option<String>,
pub id_key: Option<String>,
pub batch_size: Option<usize>,
pub batch_timeout: Option<u64>,
pub compression: Option<Compression>,
// Tower Request based configuration
pub request_in_flight_limit: Option<usize>,
pub request_timeout_secs: Option<u64>,
pub request_rate_limit_duration_secs: Option<u64>,
pub request_rate_limit_num: Option<u64>,
pub request_retry_attempts: Option<usize>,
pub request_retry_backoff_secs: Option<u64>,
}
#[typetag::serde(name = "elasticsearch")]
impl SinkConfig for ElasticSearchConfig {
fn build(&self, acker: Acker) -> Result<(super::RouterSink, super::Healthcheck), String> {
let sink = es(self.clone(), acker);
let healtcheck = healthcheck(self.host.clone());
Ok((sink, healtcheck))
}
fn input_type(&self) -> DataType {
DataType::Log
}
}
fn es(config: ElasticSearchConfig, acker: Acker) -> super::RouterSink {
let host = config.host.clone();
let id_key = config.id_key.clone();
let gzip = match config.compression.unwrap_or(Compression::Gzip) {
Compression::None => false,
Compression::Gzip => true,
};
let batch_size = config.batch_size.unwrap_or(bytesize::mib(10u64) as usize);
let batch_timeout = config.batch_timeout.unwrap_or(1);
let timeout = config.request_timeout_secs.unwrap_or(60);
let in_flight_limit = config.request_in_flight_limit.unwrap_or(5);
let rate_limit_duration = config.request_rate_limit_duration_secs.unwrap_or(1);
let rate_limit_num = config.request_rate_limit_num.unwrap_or(5);
let retry_attempts = config.request_retry_attempts.unwrap_or(usize::max_value());
let retry_backoff_secs = config.request_retry_backoff_secs.unwrap_or(1);
let index = if let Some(idx) = &config.index {
Template::from(idx.as_str())
} else {
Template::from("vector-%Y.%m.%d")
};
let doc_type = config.clone().doc_type.unwrap_or("_doc".into());
let policy = FixedRetryPolicy::new(
retry_attempts,
Duration::from_secs(retry_backoff_secs),
HttpRetryLogic,
);
let http_service = HttpService::new(move |body: Vec<u8>| {
let uri = format!("{}/_bulk", host);
let uri: Uri = uri.parse().unwrap();
let mut builder = hyper::Request::builder();
builder.method(Method::POST);
builder.uri(uri);
builder.header("Content-Type", "application/x-ndjson");
if gzip {
builder.header("Content-Encoding", "gzip");
}
builder.body(body).unwrap()
});
let service = ServiceBuilder::new()
.concurrency_limit(in_flight_limit)
.rate_limit(rate_limit_num, Duration::from_secs(rate_limit_duration))
.retry(policy)
.timeout(Duration::from_secs(timeout))
.service(http_service);
let sink = BatchServiceSink::new(service, acker)
.batched_with_min(
Buffer::new(gzip),
batch_size,
Duration::from_secs(batch_timeout),
)
.with_flat_map(move |e| iter_ok(encode_event(e, &index, &doc_type, &id_key)));
Box::new(sink)
}
fn encode_event(
event: Event,
index: &Template,
doc_type: &String,
id_key: &Option<String>,
) -> Option<Vec<u8>> {
let index = index
.render_string(&event)
.map_err(|keys| {
warn!(
message = "Keys do not exist on the event. Dropping event.",
?keys
);
})
.ok()?;
let mut action = json!({
"index": {
"_index": index,
"_type": doc_type,
}
});
maybe_set_id(
id_key.as_ref(),
action.pointer_mut("/index").unwrap(),
&event,
);
let mut body = serde_json::to_vec(&action).unwrap();
body.push(b'\n');
serde_json::to_writer(&mut body, &event.into_log().unflatten()).unwrap();
body.push(b'\n');
Some(body)
}
fn healthcheck(host: String) -> super::Healthcheck {
let uri = format!("{}/_cluster/health", host);
let request = Request::get(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
let healthcheck = client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
});
Box::new(healthcheck)
}
fn maybe_set_id(key: Option<impl AsRef<str>>, doc: &mut serde_json::Value, event: &Event) {
if let Some(val) = key.and_then(|k| event.as_log().get(&k.as_ref().into())) {
let val = val.to_string_lossy();
doc.as_object_mut()
.unwrap()
.insert("_id".into(), json!(val));
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Event;
use serde_json::json;
#[test]
fn sets_id_from_custom_field() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({"_id": "bar"}), action);
}
#[test]
fn doesnt_set_id_when_field_missing() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log() | maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
#[test]
fn doesnt_set_id_when_not_configured() {
let id_key: Option<&str> = None;
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
}
#[cfg(test)]
#[cfg(feature = "es-integration-tests")]
mod integration_tests {
use super::*;
use crate::buffers::Acker;
use crate::{
event,
test_util::{block_on, random_events_with_stream, random_string},
topology::config::SinkConfig,
Event,
};
use elastic::client::SyncClientBuilder;
use futures::{Future, Sink};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde_json::{json, Value};
#[test]
fn structures_events_correctly() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
id_key: Some("my_id".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let mut input_event = Event::from("raw log line");
input_event
.as_mut_log()
.insert_explicit("my_id".into(), "42".into());
input_event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let pump = sink.send(input_event.clone());
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(1, response.total());
let hit = response.into_hits().next().unwrap();
assert_eq!("42", hit.id());
let value = hit.into_document().unwrap();
let expected = json!({
"message": "raw log line",
"my_id": "42",
"foo": "bar",
"timestamp": input_event.as_log()[&event::TIMESTAMP],
});
assert_eq!(expected, value);
}
#[test]
fn insert_events() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let (input, events) = random_events_with_stream(100, 100);
let pump = sink.send_all(events);
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(input.len() as u64, response.total());
let input = input
.into_iter()
.map(|rec| serde_json::to_value(rec.into_log().unflatten()).unwrap())
.collect::<Vec<_>>();
for hit in response.into_hits() {
let event = hit.into_document().unwrap();
assert!(input.contains(&event));
}
}
fn gen_index() -> String {
format!("test-{}", random_string(10).to_lowercase())
}
fn flush(host: String) -> impl Future<Item = (), Error = String> {
let uri = format!("{}/_flush", host);
let request = Request::post(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
})
}
} | .insert_explicit("not_foo".into(), "bar".into());
let mut action = json!({});
| random_line_split |
elasticsearch.rs | use crate::{
buffers::Acker,
event::Event,
sinks::util::{
http::{HttpRetryLogic, HttpService},
retries::FixedRetryPolicy,
BatchServiceSink, Buffer, Compression, SinkExt,
},
template::Template,
topology::config::{DataType, SinkConfig},
};
use futures::{stream::iter_ok, Future, Sink};
use http::{Method, Uri};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::time::Duration;
use tower::ServiceBuilder;
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields)]
pub struct ElasticSearchConfig {
pub host: String,
pub index: Option<String>,
pub doc_type: Option<String>,
pub id_key: Option<String>,
pub batch_size: Option<usize>,
pub batch_timeout: Option<u64>,
pub compression: Option<Compression>,
// Tower Request based configuration
pub request_in_flight_limit: Option<usize>,
pub request_timeout_secs: Option<u64>,
pub request_rate_limit_duration_secs: Option<u64>,
pub request_rate_limit_num: Option<u64>,
pub request_retry_attempts: Option<usize>,
pub request_retry_backoff_secs: Option<u64>,
}
#[typetag::serde(name = "elasticsearch")]
impl SinkConfig for ElasticSearchConfig {
fn build(&self, acker: Acker) -> Result<(super::RouterSink, super::Healthcheck), String> {
let sink = es(self.clone(), acker);
let healtcheck = healthcheck(self.host.clone());
Ok((sink, healtcheck))
}
fn input_type(&self) -> DataType {
DataType::Log
}
}
fn es(config: ElasticSearchConfig, acker: Acker) -> super::RouterSink {
let host = config.host.clone();
let id_key = config.id_key.clone();
let gzip = match config.compression.unwrap_or(Compression::Gzip) {
Compression::None => false,
Compression::Gzip => true,
};
let batch_size = config.batch_size.unwrap_or(bytesize::mib(10u64) as usize);
let batch_timeout = config.batch_timeout.unwrap_or(1);
let timeout = config.request_timeout_secs.unwrap_or(60);
let in_flight_limit = config.request_in_flight_limit.unwrap_or(5);
let rate_limit_duration = config.request_rate_limit_duration_secs.unwrap_or(1);
let rate_limit_num = config.request_rate_limit_num.unwrap_or(5);
let retry_attempts = config.request_retry_attempts.unwrap_or(usize::max_value());
let retry_backoff_secs = config.request_retry_backoff_secs.unwrap_or(1);
let index = if let Some(idx) = &config.index {
Template::from(idx.as_str())
} else {
Template::from("vector-%Y.%m.%d")
};
let doc_type = config.clone().doc_type.unwrap_or("_doc".into());
let policy = FixedRetryPolicy::new(
retry_attempts,
Duration::from_secs(retry_backoff_secs),
HttpRetryLogic,
);
let http_service = HttpService::new(move |body: Vec<u8>| {
let uri = format!("{}/_bulk", host);
let uri: Uri = uri.parse().unwrap();
let mut builder = hyper::Request::builder();
builder.method(Method::POST);
builder.uri(uri);
builder.header("Content-Type", "application/x-ndjson");
if gzip {
builder.header("Content-Encoding", "gzip");
}
builder.body(body).unwrap()
});
let service = ServiceBuilder::new()
.concurrency_limit(in_flight_limit)
.rate_limit(rate_limit_num, Duration::from_secs(rate_limit_duration))
.retry(policy)
.timeout(Duration::from_secs(timeout))
.service(http_service);
let sink = BatchServiceSink::new(service, acker)
.batched_with_min(
Buffer::new(gzip),
batch_size,
Duration::from_secs(batch_timeout),
)
.with_flat_map(move |e| iter_ok(encode_event(e, &index, &doc_type, &id_key)));
Box::new(sink)
}
fn encode_event(
event: Event,
index: &Template,
doc_type: &String,
id_key: &Option<String>,
) -> Option<Vec<u8>> {
let index = index
.render_string(&event)
.map_err(|keys| {
warn!(
message = "Keys do not exist on the event. Dropping event.",
?keys
);
})
.ok()?;
let mut action = json!({
"index": {
"_index": index,
"_type": doc_type,
}
});
maybe_set_id(
id_key.as_ref(),
action.pointer_mut("/index").unwrap(),
&event,
);
let mut body = serde_json::to_vec(&action).unwrap();
body.push(b'\n');
serde_json::to_writer(&mut body, &event.into_log().unflatten()).unwrap();
body.push(b'\n');
Some(body)
}
fn healthcheck(host: String) -> super::Healthcheck {
let uri = format!("{}/_cluster/health", host);
let request = Request::get(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
let healthcheck = client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
});
Box::new(healthcheck)
}
fn maybe_set_id(key: Option<impl AsRef<str>>, doc: &mut serde_json::Value, event: &Event) {
if let Some(val) = key.and_then(|k| event.as_log().get(&k.as_ref().into())) {
let val = val.to_string_lossy();
doc.as_object_mut()
.unwrap()
.insert("_id".into(), json!(val));
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Event;
use serde_json::json;
#[test]
fn sets_id_from_custom_field() |
#[test]
fn doesnt_set_id_when_field_missing() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("not_foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
#[test]
fn doesnt_set_id_when_not_configured() {
let id_key: Option<&str> = None;
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
}
#[cfg(test)]
#[cfg(feature = "es-integration-tests")]
mod integration_tests {
use super::*;
use crate::buffers::Acker;
use crate::{
event,
test_util::{block_on, random_events_with_stream, random_string},
topology::config::SinkConfig,
Event,
};
use elastic::client::SyncClientBuilder;
use futures::{Future, Sink};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde_json::{json, Value};
#[test]
fn structures_events_correctly() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
id_key: Some("my_id".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let mut input_event = Event::from("raw log line");
input_event
.as_mut_log()
.insert_explicit("my_id".into(), "42".into());
input_event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let pump = sink.send(input_event.clone());
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(1, response.total());
let hit = response.into_hits().next().unwrap();
assert_eq!("42", hit.id());
let value = hit.into_document().unwrap();
let expected = json!({
"message": "raw log line",
"my_id": "42",
"foo": "bar",
"timestamp": input_event.as_log()[&event::TIMESTAMP],
});
assert_eq!(expected, value);
}
#[test]
fn insert_events() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let (input, events) = random_events_with_stream(100, 100);
let pump = sink.send_all(events);
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(input.len() as u64, response.total());
let input = input
.into_iter()
.map(|rec| serde_json::to_value(rec.into_log().unflatten()).unwrap())
.collect::<Vec<_>>();
for hit in response.into_hits() {
let event = hit.into_document().unwrap();
assert!(input.contains(&event));
}
}
fn gen_index() -> String {
format!("test-{}", random_string(10).to_lowercase())
}
fn flush(host: String) -> impl Future<Item = (), Error = String> {
let uri = format!("{}/_flush", host);
let request = Request::post(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
})
}
}
| {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({"_id": "bar"}), action);
} | identifier_body |
elasticsearch.rs | use crate::{
buffers::Acker,
event::Event,
sinks::util::{
http::{HttpRetryLogic, HttpService},
retries::FixedRetryPolicy,
BatchServiceSink, Buffer, Compression, SinkExt,
},
template::Template,
topology::config::{DataType, SinkConfig},
};
use futures::{stream::iter_ok, Future, Sink};
use http::{Method, Uri};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::time::Duration;
use tower::ServiceBuilder;
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields)]
pub struct ElasticSearchConfig {
pub host: String,
pub index: Option<String>,
pub doc_type: Option<String>,
pub id_key: Option<String>,
pub batch_size: Option<usize>,
pub batch_timeout: Option<u64>,
pub compression: Option<Compression>,
// Tower Request based configuration
pub request_in_flight_limit: Option<usize>,
pub request_timeout_secs: Option<u64>,
pub request_rate_limit_duration_secs: Option<u64>,
pub request_rate_limit_num: Option<u64>,
pub request_retry_attempts: Option<usize>,
pub request_retry_backoff_secs: Option<u64>,
}
#[typetag::serde(name = "elasticsearch")]
impl SinkConfig for ElasticSearchConfig {
fn build(&self, acker: Acker) -> Result<(super::RouterSink, super::Healthcheck), String> {
let sink = es(self.clone(), acker);
let healtcheck = healthcheck(self.host.clone());
Ok((sink, healtcheck))
}
fn input_type(&self) -> DataType {
DataType::Log
}
}
fn es(config: ElasticSearchConfig, acker: Acker) -> super::RouterSink {
let host = config.host.clone();
let id_key = config.id_key.clone();
let gzip = match config.compression.unwrap_or(Compression::Gzip) {
Compression::None => false,
Compression::Gzip => true,
};
let batch_size = config.batch_size.unwrap_or(bytesize::mib(10u64) as usize);
let batch_timeout = config.batch_timeout.unwrap_or(1);
let timeout = config.request_timeout_secs.unwrap_or(60);
let in_flight_limit = config.request_in_flight_limit.unwrap_or(5);
let rate_limit_duration = config.request_rate_limit_duration_secs.unwrap_or(1);
let rate_limit_num = config.request_rate_limit_num.unwrap_or(5);
let retry_attempts = config.request_retry_attempts.unwrap_or(usize::max_value());
let retry_backoff_secs = config.request_retry_backoff_secs.unwrap_or(1);
let index = if let Some(idx) = &config.index {
Template::from(idx.as_str())
} else {
Template::from("vector-%Y.%m.%d")
};
let doc_type = config.clone().doc_type.unwrap_or("_doc".into());
let policy = FixedRetryPolicy::new(
retry_attempts,
Duration::from_secs(retry_backoff_secs),
HttpRetryLogic,
);
let http_service = HttpService::new(move |body: Vec<u8>| {
let uri = format!("{}/_bulk", host);
let uri: Uri = uri.parse().unwrap();
let mut builder = hyper::Request::builder();
builder.method(Method::POST);
builder.uri(uri);
builder.header("Content-Type", "application/x-ndjson");
if gzip {
builder.header("Content-Encoding", "gzip");
}
builder.body(body).unwrap()
});
let service = ServiceBuilder::new()
.concurrency_limit(in_flight_limit)
.rate_limit(rate_limit_num, Duration::from_secs(rate_limit_duration))
.retry(policy)
.timeout(Duration::from_secs(timeout))
.service(http_service);
let sink = BatchServiceSink::new(service, acker)
.batched_with_min(
Buffer::new(gzip),
batch_size,
Duration::from_secs(batch_timeout),
)
.with_flat_map(move |e| iter_ok(encode_event(e, &index, &doc_type, &id_key)));
Box::new(sink)
}
fn encode_event(
event: Event,
index: &Template,
doc_type: &String,
id_key: &Option<String>,
) -> Option<Vec<u8>> {
let index = index
.render_string(&event)
.map_err(|keys| {
warn!(
message = "Keys do not exist on the event. Dropping event.",
?keys
);
})
.ok()?;
let mut action = json!({
"index": {
"_index": index,
"_type": doc_type,
}
});
maybe_set_id(
id_key.as_ref(),
action.pointer_mut("/index").unwrap(),
&event,
);
let mut body = serde_json::to_vec(&action).unwrap();
body.push(b'\n');
serde_json::to_writer(&mut body, &event.into_log().unflatten()).unwrap();
body.push(b'\n');
Some(body)
}
fn healthcheck(host: String) -> super::Healthcheck {
let uri = format!("{}/_cluster/health", host);
let request = Request::get(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
let healthcheck = client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
});
Box::new(healthcheck)
}
fn maybe_set_id(key: Option<impl AsRef<str>>, doc: &mut serde_json::Value, event: &Event) {
if let Some(val) = key.and_then(|k| event.as_log().get(&k.as_ref().into())) {
let val = val.to_string_lossy();
doc.as_object_mut()
.unwrap()
.insert("_id".into(), json!(val));
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Event;
use serde_json::json;
#[test]
fn sets_id_from_custom_field() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({"_id": "bar"}), action);
}
#[test]
fn doesnt_set_id_when_field_missing() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("not_foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
#[test]
fn doesnt_set_id_when_not_configured() {
let id_key: Option<&str> = None;
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
}
#[cfg(test)]
#[cfg(feature = "es-integration-tests")]
mod integration_tests {
use super::*;
use crate::buffers::Acker;
use crate::{
event,
test_util::{block_on, random_events_with_stream, random_string},
topology::config::SinkConfig,
Event,
};
use elastic::client::SyncClientBuilder;
use futures::{Future, Sink};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde_json::{json, Value};
#[test]
fn structures_events_correctly() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
id_key: Some("my_id".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let mut input_event = Event::from("raw log line");
input_event
.as_mut_log()
.insert_explicit("my_id".into(), "42".into());
input_event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let pump = sink.send(input_event.clone());
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(1, response.total());
let hit = response.into_hits().next().unwrap();
assert_eq!("42", hit.id());
let value = hit.into_document().unwrap();
let expected = json!({
"message": "raw log line",
"my_id": "42",
"foo": "bar",
"timestamp": input_event.as_log()[&event::TIMESTAMP],
});
assert_eq!(expected, value);
}
#[test]
fn insert_events() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let (input, events) = random_events_with_stream(100, 100);
let pump = sink.send_all(events);
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(input.len() as u64, response.total());
let input = input
.into_iter()
.map(|rec| serde_json::to_value(rec.into_log().unflatten()).unwrap())
.collect::<Vec<_>>();
for hit in response.into_hits() {
let event = hit.into_document().unwrap();
assert!(input.contains(&event));
}
}
fn gen_index() -> String {
format!("test-{}", random_string(10).to_lowercase())
}
fn | (host: String) -> impl Future<Item = (), Error = String> {
let uri = format!("{}/_flush", host);
let request = Request::post(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
})
}
}
| flush | identifier_name |
lib.rs | mod pixel;
mod y4m;
use self::pixel::*;
use ::y4m::{Colorspace, Decoder};
use std::cmp;
use std::collections::{BTreeMap, BTreeSet};
use std::io::Read;
/// Options determining how to run scene change detection.
pub struct DetectionOptions {
/// Whether or not to analyze the chroma planes.
/// Enabling this is slower, but may be more accurate.
pub use_chroma: bool,
/// Enabling this will utilize heuristics to avoid scenecuts
/// that are too close to each other.
/// This is generally useful if you want scenecut detection
/// for use in an encoder.
/// If you want a raw list of scene changes, you should disable this.
pub ignore_flashes: bool,
/// The minimum distane between two scene changes.
pub min_scenecut_distance: Option<usize>,
/// The maximum distance between two scene changes.
pub max_scenecut_distance: Option<usize>,
/// The distance to look ahead in the video
/// for scene flash detection.
///
/// Not used if `ignore_flashes` is `true`.
pub lookahead_distance: usize,
/// An optional callback that will fire after each frame is analyzed.
/// Arguments passed in will be, in order,
/// the number of frames analyzed, and the number of keyframes detected.
///
/// This is generally useful for displaying progress, etc.
pub progress_callback: Option<Box<dyn Fn(usize, usize)>>,
}
impl Default for DetectionOptions {
fn default() -> Self {
DetectionOptions {
use_chroma: true,
ignore_flashes: false,
lookahead_distance: 5,
min_scenecut_distance: None,
max_scenecut_distance: None,
progress_callback: None,
}
}
}
/// Runs through a y4m video clip,
/// detecting where scene changes occur.
/// This is adjustable based on the `opts` parameters.
///
/// Returns a `Vec` containing the frame numbers where the scene changes occur.
pub fn detect_scene_changes<R: Read, T: Pixel>(
dec: &mut Decoder<R>,
opts: DetectionOptions,
) -> Vec<usize> {
assert!(opts.lookahead_distance >= 1);
let bit_depth = dec.get_bit_depth() as u8;
let chroma_sampling = ChromaSampling::from(dec.get_colorspace()); | let mut keyframes = BTreeSet::new();
let mut frameno = 0;
loop {
let mut next_input_frameno = frame_queue
.keys()
.last()
.copied()
.map(|key| key + 1)
.unwrap_or(0);
while next_input_frameno < frameno + opts.lookahead_distance {
let frame = y4m::read_video_frame::<R, T>(dec);
if let Ok(frame) = frame {
frame_queue.insert(next_input_frameno, frame);
next_input_frameno += 1;
} else {
// End of input
break;
}
}
let frame_set = frame_queue
.iter()
.skip_while(|&(&key, _)| key < frameno)
.map(|(_, value)| value)
.take(opts.lookahead_distance)
.collect::<Vec<_>>();
if frame_set.is_empty() {
// End of video
break;
}
detector.analyze_next_frame(
if frameno == 0 {
None
} else {
frame_queue.get(&(frameno - 1))
},
&frame_set,
frameno,
&mut keyframes,
);
if frameno > 0 {
frame_queue.remove(&(frameno - 1));
}
frameno += 1;
if let Some(ref progress_fn) = opts.progress_callback {
progress_fn(frameno, keyframes.len());
}
}
keyframes.into_iter().collect()
}
type PlaneData<T> = [Vec<T>; 3];
/// Available chroma sampling formats.
#[derive(Copy, Clone, Debug, PartialEq)]
enum ChromaSampling {
/// Both vertically and horizontally subsampled.
Cs420,
/// Horizontally subsampled.
Cs422,
/// Not subsampled.
Cs444,
/// Monochrome.
Cs400,
}
impl From<Colorspace> for ChromaSampling {
fn from(other: Colorspace) -> Self {
use Colorspace::*;
match other {
Cmono => ChromaSampling::Cs400,
C420 | C420p10 | C420p12 | C420jpeg | C420paldv | C420mpeg2 => ChromaSampling::Cs420,
C422 | C422p10 | C422p12 => ChromaSampling::Cs422,
C444 | C444p10 | C444p12 => ChromaSampling::Cs444,
}
}
}
impl ChromaSampling {
/// Provides the amount to right shift the luma plane dimensions to get the
/// chroma plane dimensions.
/// Only values 0 or 1 are ever returned.
/// The plane dimensions must also be rounded up to accommodate odd luma plane
/// sizes.
/// Cs400 returns None, as there are no chroma planes.
pub fn get_decimation(self) -> Option<(usize, usize)> {
use self::ChromaSampling::*;
match self {
Cs420 => Some((1, 1)),
Cs422 => Some((1, 0)),
Cs444 => Some((0, 0)),
Cs400 => None,
}
}
}
/// Runs keyframe detection on frames from the lookahead queue.
struct SceneChangeDetector<'a> {
/// Minimum average difference between YUV deltas that will trigger a scene change.
threshold: u8,
opts: &'a DetectionOptions,
/// Frames that cannot be marked as keyframes due to the algorithm excluding them.
/// Storing the frame numbers allows us to avoid looking back more than one frame.
excluded_frames: BTreeSet<usize>,
chroma_sampling: ChromaSampling,
}
impl<'a> SceneChangeDetector<'a> {
pub fn new(bit_depth: u8, chroma_sampling: ChromaSampling, opts: &'a DetectionOptions) -> Self {
// This implementation is based on a Python implementation at
// https://pyscenedetect.readthedocs.io/en/latest/reference/detection-methods/.
// The Python implementation uses HSV values and a threshold of 30. Comparing the
// YUV values was sufficient in most cases, and avoided a more costly YUV->RGB->HSV
// conversion, but the deltas needed to be scaled down. The deltas for keyframes
// in YUV were about 1/3 to 1/2 of what they were in HSV, but non-keyframes were
// very unlikely to have a delta greater than 3 in YUV, whereas they may reach into
// the double digits in HSV. Therefore, 12 was chosen as a reasonable default threshold.
// This may be adjusted later.
const BASE_THRESHOLD: u8 = 12;
Self {
threshold: BASE_THRESHOLD * bit_depth / 8,
opts,
excluded_frames: BTreeSet::new(),
chroma_sampling,
}
}
/// Runs keyframe detection on the next frame in the lookahead queue.
///
/// This function requires that a subset of input frames
/// is passed to it in order, and that `keyframes` is only
/// updated from this method. `input_frameno` should correspond
/// to the first frame in `frame_set`.
///
/// This will gracefully handle the first frame in the video as well.
pub fn analyze_next_frame<T: Pixel>(
&mut self,
previous_frame: Option<&PlaneData<T>>,
frame_set: &[&PlaneData<T>],
input_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) {
let frame_set = match previous_frame {
Some(frame) => [frame]
.iter()
.chain(frame_set.iter())
.cloned()
.collect::<Vec<_>>(),
None => {
// The first frame is always a keyframe.
keyframes.insert(0);
return;
}
};
self.exclude_scene_flashes(&frame_set, input_frameno);
if self.is_key_frame(&frame_set[0], &frame_set[1], input_frameno, keyframes) {
keyframes.insert(input_frameno);
}
}
/// Determines if `current_frame` should be a keyframe.
fn is_key_frame<T: Pixel>(
&self,
previous_frame: &PlaneData<T>,
current_frame: &PlaneData<T>,
current_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) -> bool {
// Find the distance to the previous keyframe.
let previous_keyframe = keyframes.iter().last().unwrap();
let distance = current_frameno - previous_keyframe;
// Handle minimum and maximum key frame intervals.
if distance < self.opts.min_scenecut_distance.unwrap_or(0) {
return false;
}
if distance
>= self
.opts
.max_scenecut_distance
.unwrap_or(usize::max_value())
{
return true;
}
if self.excluded_frames.contains(¤t_frameno) {
return false;
}
self.has_scenecut(previous_frame, current_frame)
}
/// Uses lookahead to avoid coding short flashes as scenecuts.
/// Saves excluded frame numbers in `self.excluded_frames`.
fn exclude_scene_flashes<T: Pixel>(&mut self, frame_subset: &[&PlaneData<T>], frameno: usize) {
let lookahead_distance = cmp::min(self.opts.lookahead_distance, frame_subset.len() - 1);
// Where A and B are scenes: AAAAAABBBAAAAAA
// If BBB is shorter than lookahead_distance, it is detected as a flash
// and not considered a scenecut.
for j in 1..=lookahead_distance {
if!self.has_scenecut(&frame_subset[0], &frame_subset[j]) {
// Any frame in between `0` and `j` cannot be a real scenecut.
for i in 0..=j {
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
// Where A-F are scenes: AAAAABBCCDDEEFFFFFF
// If each of BB... EE are shorter than `lookahead_distance`, they are
// detected as flashes and not considered scenecuts.
// Instead, the first F frame becomes a scenecut.
// If the video ends before F, no frame becomes a scenecut.
for i in 1..lookahead_distance {
if self.has_scenecut(&frame_subset[i], &frame_subset[lookahead_distance]) {
// If the current frame is the frame before a scenecut, it cannot also be the frame of a scenecut.
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
/// Run a comparison between two frames to determine if they qualify for a scenecut.
///
/// The current algorithm detects fast cuts using changes in colour and intensity between frames.
/// Since the difference between frames is used, only fast cuts are detected
/// with this method. This is intended to change via https://github.com/xiph/rav1e/issues/794.
fn has_scenecut<T: Pixel>(&self, frame1: &PlaneData<T>, frame2: &PlaneData<T>) -> bool {
let mut delta = Self::get_plane_sad(&frame1[0], &frame2[0]);
let mut len = frame1[0].len() as u64;
if self.opts.use_chroma && self.chroma_sampling!= ChromaSampling::Cs400 {
let (x_dec, y_dec) = self.chroma_sampling.get_decimation().unwrap();
let dec = x_dec + y_dec;
delta += Self::get_plane_sad(&frame1[1], &frame2[1]) << dec;
len += (frame1[1].len() as u64) << dec;
delta += Self::get_plane_sad(&frame1[2], &frame2[2]) << dec;
len += (frame1[2].len() as u64) << dec;
}
delta >= self.threshold as u64 * len
}
#[inline(always)]
fn get_plane_sad<T: Pixel>(plane1: &[T], plane2: &[T]) -> u64 {
assert_eq!(plane1.len(), plane2.len());
plane1
.iter()
.zip(plane2.iter())
.map(|(&p1, &p2)| (i16::cast_from(p1) - i16::cast_from(p2)).abs() as u64)
.sum::<u64>()
}
} | let mut detector = SceneChangeDetector::new(bit_depth, chroma_sampling, &opts);
let mut frame_queue = BTreeMap::new(); | random_line_split |
lib.rs | mod pixel;
mod y4m;
use self::pixel::*;
use ::y4m::{Colorspace, Decoder};
use std::cmp;
use std::collections::{BTreeMap, BTreeSet};
use std::io::Read;
/// Options determining how to run scene change detection.
pub struct DetectionOptions {
/// Whether or not to analyze the chroma planes.
/// Enabling this is slower, but may be more accurate.
pub use_chroma: bool,
/// Enabling this will utilize heuristics to avoid scenecuts
/// that are too close to each other.
/// This is generally useful if you want scenecut detection
/// for use in an encoder.
/// If you want a raw list of scene changes, you should disable this.
pub ignore_flashes: bool,
/// The minimum distane between two scene changes.
pub min_scenecut_distance: Option<usize>,
/// The maximum distance between two scene changes.
pub max_scenecut_distance: Option<usize>,
/// The distance to look ahead in the video
/// for scene flash detection.
///
/// Not used if `ignore_flashes` is `true`.
pub lookahead_distance: usize,
/// An optional callback that will fire after each frame is analyzed.
/// Arguments passed in will be, in order,
/// the number of frames analyzed, and the number of keyframes detected.
///
/// This is generally useful for displaying progress, etc.
pub progress_callback: Option<Box<dyn Fn(usize, usize)>>,
}
impl Default for DetectionOptions {
fn default() -> Self {
DetectionOptions {
use_chroma: true,
ignore_flashes: false,
lookahead_distance: 5,
min_scenecut_distance: None,
max_scenecut_distance: None,
progress_callback: None,
}
}
}
/// Runs through a y4m video clip,
/// detecting where scene changes occur.
/// This is adjustable based on the `opts` parameters.
///
/// Returns a `Vec` containing the frame numbers where the scene changes occur.
pub fn detect_scene_changes<R: Read, T: Pixel>(
dec: &mut Decoder<R>,
opts: DetectionOptions,
) -> Vec<usize> {
assert!(opts.lookahead_distance >= 1);
let bit_depth = dec.get_bit_depth() as u8;
let chroma_sampling = ChromaSampling::from(dec.get_colorspace());
let mut detector = SceneChangeDetector::new(bit_depth, chroma_sampling, &opts);
let mut frame_queue = BTreeMap::new();
let mut keyframes = BTreeSet::new();
let mut frameno = 0;
loop {
let mut next_input_frameno = frame_queue
.keys()
.last()
.copied()
.map(|key| key + 1)
.unwrap_or(0);
while next_input_frameno < frameno + opts.lookahead_distance {
let frame = y4m::read_video_frame::<R, T>(dec);
if let Ok(frame) = frame {
frame_queue.insert(next_input_frameno, frame);
next_input_frameno += 1;
} else {
// End of input
break;
}
}
let frame_set = frame_queue
.iter()
.skip_while(|&(&key, _)| key < frameno)
.map(|(_, value)| value)
.take(opts.lookahead_distance)
.collect::<Vec<_>>();
if frame_set.is_empty() {
// End of video
break;
}
detector.analyze_next_frame(
if frameno == 0 | else {
frame_queue.get(&(frameno - 1))
},
&frame_set,
frameno,
&mut keyframes,
);
if frameno > 0 {
frame_queue.remove(&(frameno - 1));
}
frameno += 1;
if let Some(ref progress_fn) = opts.progress_callback {
progress_fn(frameno, keyframes.len());
}
}
keyframes.into_iter().collect()
}
type PlaneData<T> = [Vec<T>; 3];
/// Available chroma sampling formats.
#[derive(Copy, Clone, Debug, PartialEq)]
enum ChromaSampling {
/// Both vertically and horizontally subsampled.
Cs420,
/// Horizontally subsampled.
Cs422,
/// Not subsampled.
Cs444,
/// Monochrome.
Cs400,
}
impl From<Colorspace> for ChromaSampling {
fn from(other: Colorspace) -> Self {
use Colorspace::*;
match other {
Cmono => ChromaSampling::Cs400,
C420 | C420p10 | C420p12 | C420jpeg | C420paldv | C420mpeg2 => ChromaSampling::Cs420,
C422 | C422p10 | C422p12 => ChromaSampling::Cs422,
C444 | C444p10 | C444p12 => ChromaSampling::Cs444,
}
}
}
impl ChromaSampling {
/// Provides the amount to right shift the luma plane dimensions to get the
/// chroma plane dimensions.
/// Only values 0 or 1 are ever returned.
/// The plane dimensions must also be rounded up to accommodate odd luma plane
/// sizes.
/// Cs400 returns None, as there are no chroma planes.
pub fn get_decimation(self) -> Option<(usize, usize)> {
use self::ChromaSampling::*;
match self {
Cs420 => Some((1, 1)),
Cs422 => Some((1, 0)),
Cs444 => Some((0, 0)),
Cs400 => None,
}
}
}
/// Runs keyframe detection on frames from the lookahead queue.
struct SceneChangeDetector<'a> {
/// Minimum average difference between YUV deltas that will trigger a scene change.
threshold: u8,
opts: &'a DetectionOptions,
/// Frames that cannot be marked as keyframes due to the algorithm excluding them.
/// Storing the frame numbers allows us to avoid looking back more than one frame.
excluded_frames: BTreeSet<usize>,
chroma_sampling: ChromaSampling,
}
impl<'a> SceneChangeDetector<'a> {
pub fn new(bit_depth: u8, chroma_sampling: ChromaSampling, opts: &'a DetectionOptions) -> Self {
// This implementation is based on a Python implementation at
// https://pyscenedetect.readthedocs.io/en/latest/reference/detection-methods/.
// The Python implementation uses HSV values and a threshold of 30. Comparing the
// YUV values was sufficient in most cases, and avoided a more costly YUV->RGB->HSV
// conversion, but the deltas needed to be scaled down. The deltas for keyframes
// in YUV were about 1/3 to 1/2 of what they were in HSV, but non-keyframes were
// very unlikely to have a delta greater than 3 in YUV, whereas they may reach into
// the double digits in HSV. Therefore, 12 was chosen as a reasonable default threshold.
// This may be adjusted later.
const BASE_THRESHOLD: u8 = 12;
Self {
threshold: BASE_THRESHOLD * bit_depth / 8,
opts,
excluded_frames: BTreeSet::new(),
chroma_sampling,
}
}
/// Runs keyframe detection on the next frame in the lookahead queue.
///
/// This function requires that a subset of input frames
/// is passed to it in order, and that `keyframes` is only
/// updated from this method. `input_frameno` should correspond
/// to the first frame in `frame_set`.
///
/// This will gracefully handle the first frame in the video as well.
pub fn analyze_next_frame<T: Pixel>(
&mut self,
previous_frame: Option<&PlaneData<T>>,
frame_set: &[&PlaneData<T>],
input_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) {
let frame_set = match previous_frame {
Some(frame) => [frame]
.iter()
.chain(frame_set.iter())
.cloned()
.collect::<Vec<_>>(),
None => {
// The first frame is always a keyframe.
keyframes.insert(0);
return;
}
};
self.exclude_scene_flashes(&frame_set, input_frameno);
if self.is_key_frame(&frame_set[0], &frame_set[1], input_frameno, keyframes) {
keyframes.insert(input_frameno);
}
}
/// Determines if `current_frame` should be a keyframe.
fn is_key_frame<T: Pixel>(
&self,
previous_frame: &PlaneData<T>,
current_frame: &PlaneData<T>,
current_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) -> bool {
// Find the distance to the previous keyframe.
let previous_keyframe = keyframes.iter().last().unwrap();
let distance = current_frameno - previous_keyframe;
// Handle minimum and maximum key frame intervals.
if distance < self.opts.min_scenecut_distance.unwrap_or(0) {
return false;
}
if distance
>= self
.opts
.max_scenecut_distance
.unwrap_or(usize::max_value())
{
return true;
}
if self.excluded_frames.contains(¤t_frameno) {
return false;
}
self.has_scenecut(previous_frame, current_frame)
}
/// Uses lookahead to avoid coding short flashes as scenecuts.
/// Saves excluded frame numbers in `self.excluded_frames`.
fn exclude_scene_flashes<T: Pixel>(&mut self, frame_subset: &[&PlaneData<T>], frameno: usize) {
let lookahead_distance = cmp::min(self.opts.lookahead_distance, frame_subset.len() - 1);
// Where A and B are scenes: AAAAAABBBAAAAAA
// If BBB is shorter than lookahead_distance, it is detected as a flash
// and not considered a scenecut.
for j in 1..=lookahead_distance {
if!self.has_scenecut(&frame_subset[0], &frame_subset[j]) {
// Any frame in between `0` and `j` cannot be a real scenecut.
for i in 0..=j {
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
// Where A-F are scenes: AAAAABBCCDDEEFFFFFF
// If each of BB... EE are shorter than `lookahead_distance`, they are
// detected as flashes and not considered scenecuts.
// Instead, the first F frame becomes a scenecut.
// If the video ends before F, no frame becomes a scenecut.
for i in 1..lookahead_distance {
if self.has_scenecut(&frame_subset[i], &frame_subset[lookahead_distance]) {
// If the current frame is the frame before a scenecut, it cannot also be the frame of a scenecut.
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
/// Run a comparison between two frames to determine if they qualify for a scenecut.
///
/// The current algorithm detects fast cuts using changes in colour and intensity between frames.
/// Since the difference between frames is used, only fast cuts are detected
/// with this method. This is intended to change via https://github.com/xiph/rav1e/issues/794.
fn has_scenecut<T: Pixel>(&self, frame1: &PlaneData<T>, frame2: &PlaneData<T>) -> bool {
let mut delta = Self::get_plane_sad(&frame1[0], &frame2[0]);
let mut len = frame1[0].len() as u64;
if self.opts.use_chroma && self.chroma_sampling!= ChromaSampling::Cs400 {
let (x_dec, y_dec) = self.chroma_sampling.get_decimation().unwrap();
let dec = x_dec + y_dec;
delta += Self::get_plane_sad(&frame1[1], &frame2[1]) << dec;
len += (frame1[1].len() as u64) << dec;
delta += Self::get_plane_sad(&frame1[2], &frame2[2]) << dec;
len += (frame1[2].len() as u64) << dec;
}
delta >= self.threshold as u64 * len
}
#[inline(always)]
fn get_plane_sad<T: Pixel>(plane1: &[T], plane2: &[T]) -> u64 {
assert_eq!(plane1.len(), plane2.len());
plane1
.iter()
.zip(plane2.iter())
.map(|(&p1, &p2)| (i16::cast_from(p1) - i16::cast_from(p2)).abs() as u64)
.sum::<u64>()
}
}
| {
None
} | conditional_block |
lib.rs | mod pixel;
mod y4m;
use self::pixel::*;
use ::y4m::{Colorspace, Decoder};
use std::cmp;
use std::collections::{BTreeMap, BTreeSet};
use std::io::Read;
/// Options determining how to run scene change detection.
pub struct DetectionOptions {
/// Whether or not to analyze the chroma planes.
/// Enabling this is slower, but may be more accurate.
pub use_chroma: bool,
/// Enabling this will utilize heuristics to avoid scenecuts
/// that are too close to each other.
/// This is generally useful if you want scenecut detection
/// for use in an encoder.
/// If you want a raw list of scene changes, you should disable this.
pub ignore_flashes: bool,
/// The minimum distane between two scene changes.
pub min_scenecut_distance: Option<usize>,
/// The maximum distance between two scene changes.
pub max_scenecut_distance: Option<usize>,
/// The distance to look ahead in the video
/// for scene flash detection.
///
/// Not used if `ignore_flashes` is `true`.
pub lookahead_distance: usize,
/// An optional callback that will fire after each frame is analyzed.
/// Arguments passed in will be, in order,
/// the number of frames analyzed, and the number of keyframes detected.
///
/// This is generally useful for displaying progress, etc.
pub progress_callback: Option<Box<dyn Fn(usize, usize)>>,
}
impl Default for DetectionOptions {
fn default() -> Self {
DetectionOptions {
use_chroma: true,
ignore_flashes: false,
lookahead_distance: 5,
min_scenecut_distance: None,
max_scenecut_distance: None,
progress_callback: None,
}
}
}
/// Runs through a y4m video clip,
/// detecting where scene changes occur.
/// This is adjustable based on the `opts` parameters.
///
/// Returns a `Vec` containing the frame numbers where the scene changes occur.
pub fn detect_scene_changes<R: Read, T: Pixel>(
dec: &mut Decoder<R>,
opts: DetectionOptions,
) -> Vec<usize> {
assert!(opts.lookahead_distance >= 1);
let bit_depth = dec.get_bit_depth() as u8;
let chroma_sampling = ChromaSampling::from(dec.get_colorspace());
let mut detector = SceneChangeDetector::new(bit_depth, chroma_sampling, &opts);
let mut frame_queue = BTreeMap::new();
let mut keyframes = BTreeSet::new();
let mut frameno = 0;
loop {
let mut next_input_frameno = frame_queue
.keys()
.last()
.copied()
.map(|key| key + 1)
.unwrap_or(0);
while next_input_frameno < frameno + opts.lookahead_distance {
let frame = y4m::read_video_frame::<R, T>(dec);
if let Ok(frame) = frame {
frame_queue.insert(next_input_frameno, frame);
next_input_frameno += 1;
} else {
// End of input
break;
}
}
let frame_set = frame_queue
.iter()
.skip_while(|&(&key, _)| key < frameno)
.map(|(_, value)| value)
.take(opts.lookahead_distance)
.collect::<Vec<_>>();
if frame_set.is_empty() {
// End of video
break;
}
detector.analyze_next_frame(
if frameno == 0 {
None
} else {
frame_queue.get(&(frameno - 1))
},
&frame_set,
frameno,
&mut keyframes,
);
if frameno > 0 {
frame_queue.remove(&(frameno - 1));
}
frameno += 1;
if let Some(ref progress_fn) = opts.progress_callback {
progress_fn(frameno, keyframes.len());
}
}
keyframes.into_iter().collect()
}
type PlaneData<T> = [Vec<T>; 3];
/// Available chroma sampling formats.
#[derive(Copy, Clone, Debug, PartialEq)]
enum ChromaSampling {
/// Both vertically and horizontally subsampled.
Cs420,
/// Horizontally subsampled.
Cs422,
/// Not subsampled.
Cs444,
/// Monochrome.
Cs400,
}
impl From<Colorspace> for ChromaSampling {
fn from(other: Colorspace) -> Self {
use Colorspace::*;
match other {
Cmono => ChromaSampling::Cs400,
C420 | C420p10 | C420p12 | C420jpeg | C420paldv | C420mpeg2 => ChromaSampling::Cs420,
C422 | C422p10 | C422p12 => ChromaSampling::Cs422,
C444 | C444p10 | C444p12 => ChromaSampling::Cs444,
}
}
}
impl ChromaSampling {
/// Provides the amount to right shift the luma plane dimensions to get the
/// chroma plane dimensions.
/// Only values 0 or 1 are ever returned.
/// The plane dimensions must also be rounded up to accommodate odd luma plane
/// sizes.
/// Cs400 returns None, as there are no chroma planes.
pub fn get_decimation(self) -> Option<(usize, usize)> {
use self::ChromaSampling::*;
match self {
Cs420 => Some((1, 1)),
Cs422 => Some((1, 0)),
Cs444 => Some((0, 0)),
Cs400 => None,
}
}
}
/// Runs keyframe detection on frames from the lookahead queue.
struct SceneChangeDetector<'a> {
/// Minimum average difference between YUV deltas that will trigger a scene change.
threshold: u8,
opts: &'a DetectionOptions,
/// Frames that cannot be marked as keyframes due to the algorithm excluding them.
/// Storing the frame numbers allows us to avoid looking back more than one frame.
excluded_frames: BTreeSet<usize>,
chroma_sampling: ChromaSampling,
}
impl<'a> SceneChangeDetector<'a> {
pub fn new(bit_depth: u8, chroma_sampling: ChromaSampling, opts: &'a DetectionOptions) -> Self {
// This implementation is based on a Python implementation at
// https://pyscenedetect.readthedocs.io/en/latest/reference/detection-methods/.
// The Python implementation uses HSV values and a threshold of 30. Comparing the
// YUV values was sufficient in most cases, and avoided a more costly YUV->RGB->HSV
// conversion, but the deltas needed to be scaled down. The deltas for keyframes
// in YUV were about 1/3 to 1/2 of what they were in HSV, but non-keyframes were
// very unlikely to have a delta greater than 3 in YUV, whereas they may reach into
// the double digits in HSV. Therefore, 12 was chosen as a reasonable default threshold.
// This may be adjusted later.
const BASE_THRESHOLD: u8 = 12;
Self {
threshold: BASE_THRESHOLD * bit_depth / 8,
opts,
excluded_frames: BTreeSet::new(),
chroma_sampling,
}
}
/// Runs keyframe detection on the next frame in the lookahead queue.
///
/// This function requires that a subset of input frames
/// is passed to it in order, and that `keyframes` is only
/// updated from this method. `input_frameno` should correspond
/// to the first frame in `frame_set`.
///
/// This will gracefully handle the first frame in the video as well.
pub fn analyze_next_frame<T: Pixel>(
&mut self,
previous_frame: Option<&PlaneData<T>>,
frame_set: &[&PlaneData<T>],
input_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) {
let frame_set = match previous_frame {
Some(frame) => [frame]
.iter()
.chain(frame_set.iter())
.cloned()
.collect::<Vec<_>>(),
None => {
// The first frame is always a keyframe.
keyframes.insert(0);
return;
}
};
self.exclude_scene_flashes(&frame_set, input_frameno);
if self.is_key_frame(&frame_set[0], &frame_set[1], input_frameno, keyframes) {
keyframes.insert(input_frameno);
}
}
/// Determines if `current_frame` should be a keyframe.
fn is_key_frame<T: Pixel>(
&self,
previous_frame: &PlaneData<T>,
current_frame: &PlaneData<T>,
current_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) -> bool {
// Find the distance to the previous keyframe.
let previous_keyframe = keyframes.iter().last().unwrap();
let distance = current_frameno - previous_keyframe;
// Handle minimum and maximum key frame intervals.
if distance < self.opts.min_scenecut_distance.unwrap_or(0) {
return false;
}
if distance
>= self
.opts
.max_scenecut_distance
.unwrap_or(usize::max_value())
{
return true;
}
if self.excluded_frames.contains(¤t_frameno) {
return false;
}
self.has_scenecut(previous_frame, current_frame)
}
/// Uses lookahead to avoid coding short flashes as scenecuts.
/// Saves excluded frame numbers in `self.excluded_frames`.
fn exclude_scene_flashes<T: Pixel>(&mut self, frame_subset: &[&PlaneData<T>], frameno: usize) | // If the video ends before F, no frame becomes a scenecut.
for i in 1..lookahead_distance {
if self.has_scenecut(&frame_subset[i], &frame_subset[lookahead_distance]) {
// If the current frame is the frame before a scenecut, it cannot also be the frame of a scenecut.
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
/// Run a comparison between two frames to determine if they qualify for a scenecut.
///
/// The current algorithm detects fast cuts using changes in colour and intensity between frames.
/// Since the difference between frames is used, only fast cuts are detected
/// with this method. This is intended to change via https://github.com/xiph/rav1e/issues/794.
fn has_scenecut<T: Pixel>(&self, frame1: &PlaneData<T>, frame2: &PlaneData<T>) -> bool {
let mut delta = Self::get_plane_sad(&frame1[0], &frame2[0]);
let mut len = frame1[0].len() as u64;
if self.opts.use_chroma && self.chroma_sampling!= ChromaSampling::Cs400 {
let (x_dec, y_dec) = self.chroma_sampling.get_decimation().unwrap();
let dec = x_dec + y_dec;
delta += Self::get_plane_sad(&frame1[1], &frame2[1]) << dec;
len += (frame1[1].len() as u64) << dec;
delta += Self::get_plane_sad(&frame1[2], &frame2[2]) << dec;
len += (frame1[2].len() as u64) << dec;
}
delta >= self.threshold as u64 * len
}
#[inline(always)]
fn get_plane_sad<T: Pixel>(plane1: &[T], plane2: &[T]) -> u64 {
assert_eq!(plane1.len(), plane2.len());
plane1
.iter()
.zip(plane2.iter())
.map(|(&p1, &p2)| (i16::cast_from(p1) - i16::cast_from(p2)).abs() as u64)
.sum::<u64>()
}
}
| {
let lookahead_distance = cmp::min(self.opts.lookahead_distance, frame_subset.len() - 1);
// Where A and B are scenes: AAAAAABBBAAAAAA
// If BBB is shorter than lookahead_distance, it is detected as a flash
// and not considered a scenecut.
for j in 1..=lookahead_distance {
if !self.has_scenecut(&frame_subset[0], &frame_subset[j]) {
// Any frame in between `0` and `j` cannot be a real scenecut.
for i in 0..=j {
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
// Where A-F are scenes: AAAAABBCCDDEEFFFFFF
// If each of BB ... EE are shorter than `lookahead_distance`, they are
// detected as flashes and not considered scenecuts.
// Instead, the first F frame becomes a scenecut. | identifier_body |
lib.rs | mod pixel;
mod y4m;
use self::pixel::*;
use ::y4m::{Colorspace, Decoder};
use std::cmp;
use std::collections::{BTreeMap, BTreeSet};
use std::io::Read;
/// Options determining how to run scene change detection.
pub struct DetectionOptions {
/// Whether or not to analyze the chroma planes.
/// Enabling this is slower, but may be more accurate.
pub use_chroma: bool,
/// Enabling this will utilize heuristics to avoid scenecuts
/// that are too close to each other.
/// This is generally useful if you want scenecut detection
/// for use in an encoder.
/// If you want a raw list of scene changes, you should disable this.
pub ignore_flashes: bool,
/// The minimum distane between two scene changes.
pub min_scenecut_distance: Option<usize>,
/// The maximum distance between two scene changes.
pub max_scenecut_distance: Option<usize>,
/// The distance to look ahead in the video
/// for scene flash detection.
///
/// Not used if `ignore_flashes` is `true`.
pub lookahead_distance: usize,
/// An optional callback that will fire after each frame is analyzed.
/// Arguments passed in will be, in order,
/// the number of frames analyzed, and the number of keyframes detected.
///
/// This is generally useful for displaying progress, etc.
pub progress_callback: Option<Box<dyn Fn(usize, usize)>>,
}
impl Default for DetectionOptions {
fn default() -> Self {
DetectionOptions {
use_chroma: true,
ignore_flashes: false,
lookahead_distance: 5,
min_scenecut_distance: None,
max_scenecut_distance: None,
progress_callback: None,
}
}
}
/// Runs through a y4m video clip,
/// detecting where scene changes occur.
/// This is adjustable based on the `opts` parameters.
///
/// Returns a `Vec` containing the frame numbers where the scene changes occur.
pub fn detect_scene_changes<R: Read, T: Pixel>(
dec: &mut Decoder<R>,
opts: DetectionOptions,
) -> Vec<usize> {
assert!(opts.lookahead_distance >= 1);
let bit_depth = dec.get_bit_depth() as u8;
let chroma_sampling = ChromaSampling::from(dec.get_colorspace());
let mut detector = SceneChangeDetector::new(bit_depth, chroma_sampling, &opts);
let mut frame_queue = BTreeMap::new();
let mut keyframes = BTreeSet::new();
let mut frameno = 0;
loop {
let mut next_input_frameno = frame_queue
.keys()
.last()
.copied()
.map(|key| key + 1)
.unwrap_or(0);
while next_input_frameno < frameno + opts.lookahead_distance {
let frame = y4m::read_video_frame::<R, T>(dec);
if let Ok(frame) = frame {
frame_queue.insert(next_input_frameno, frame);
next_input_frameno += 1;
} else {
// End of input
break;
}
}
let frame_set = frame_queue
.iter()
.skip_while(|&(&key, _)| key < frameno)
.map(|(_, value)| value)
.take(opts.lookahead_distance)
.collect::<Vec<_>>();
if frame_set.is_empty() {
// End of video
break;
}
detector.analyze_next_frame(
if frameno == 0 {
None
} else {
frame_queue.get(&(frameno - 1))
},
&frame_set,
frameno,
&mut keyframes,
);
if frameno > 0 {
frame_queue.remove(&(frameno - 1));
}
frameno += 1;
if let Some(ref progress_fn) = opts.progress_callback {
progress_fn(frameno, keyframes.len());
}
}
keyframes.into_iter().collect()
}
type PlaneData<T> = [Vec<T>; 3];
/// Available chroma sampling formats.
#[derive(Copy, Clone, Debug, PartialEq)]
enum ChromaSampling {
/// Both vertically and horizontally subsampled.
Cs420,
/// Horizontally subsampled.
Cs422,
/// Not subsampled.
Cs444,
/// Monochrome.
Cs400,
}
impl From<Colorspace> for ChromaSampling {
fn from(other: Colorspace) -> Self {
use Colorspace::*;
match other {
Cmono => ChromaSampling::Cs400,
C420 | C420p10 | C420p12 | C420jpeg | C420paldv | C420mpeg2 => ChromaSampling::Cs420,
C422 | C422p10 | C422p12 => ChromaSampling::Cs422,
C444 | C444p10 | C444p12 => ChromaSampling::Cs444,
}
}
}
impl ChromaSampling {
/// Provides the amount to right shift the luma plane dimensions to get the
/// chroma plane dimensions.
/// Only values 0 or 1 are ever returned.
/// The plane dimensions must also be rounded up to accommodate odd luma plane
/// sizes.
/// Cs400 returns None, as there are no chroma planes.
pub fn get_decimation(self) -> Option<(usize, usize)> {
use self::ChromaSampling::*;
match self {
Cs420 => Some((1, 1)),
Cs422 => Some((1, 0)),
Cs444 => Some((0, 0)),
Cs400 => None,
}
}
}
/// Runs keyframe detection on frames from the lookahead queue.
struct | <'a> {
/// Minimum average difference between YUV deltas that will trigger a scene change.
threshold: u8,
opts: &'a DetectionOptions,
/// Frames that cannot be marked as keyframes due to the algorithm excluding them.
/// Storing the frame numbers allows us to avoid looking back more than one frame.
excluded_frames: BTreeSet<usize>,
chroma_sampling: ChromaSampling,
}
impl<'a> SceneChangeDetector<'a> {
pub fn new(bit_depth: u8, chroma_sampling: ChromaSampling, opts: &'a DetectionOptions) -> Self {
// This implementation is based on a Python implementation at
// https://pyscenedetect.readthedocs.io/en/latest/reference/detection-methods/.
// The Python implementation uses HSV values and a threshold of 30. Comparing the
// YUV values was sufficient in most cases, and avoided a more costly YUV->RGB->HSV
// conversion, but the deltas needed to be scaled down. The deltas for keyframes
// in YUV were about 1/3 to 1/2 of what they were in HSV, but non-keyframes were
// very unlikely to have a delta greater than 3 in YUV, whereas they may reach into
// the double digits in HSV. Therefore, 12 was chosen as a reasonable default threshold.
// This may be adjusted later.
const BASE_THRESHOLD: u8 = 12;
Self {
threshold: BASE_THRESHOLD * bit_depth / 8,
opts,
excluded_frames: BTreeSet::new(),
chroma_sampling,
}
}
/// Runs keyframe detection on the next frame in the lookahead queue.
///
/// This function requires that a subset of input frames
/// is passed to it in order, and that `keyframes` is only
/// updated from this method. `input_frameno` should correspond
/// to the first frame in `frame_set`.
///
/// This will gracefully handle the first frame in the video as well.
pub fn analyze_next_frame<T: Pixel>(
&mut self,
previous_frame: Option<&PlaneData<T>>,
frame_set: &[&PlaneData<T>],
input_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) {
let frame_set = match previous_frame {
Some(frame) => [frame]
.iter()
.chain(frame_set.iter())
.cloned()
.collect::<Vec<_>>(),
None => {
// The first frame is always a keyframe.
keyframes.insert(0);
return;
}
};
self.exclude_scene_flashes(&frame_set, input_frameno);
if self.is_key_frame(&frame_set[0], &frame_set[1], input_frameno, keyframes) {
keyframes.insert(input_frameno);
}
}
/// Determines if `current_frame` should be a keyframe.
fn is_key_frame<T: Pixel>(
&self,
previous_frame: &PlaneData<T>,
current_frame: &PlaneData<T>,
current_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) -> bool {
// Find the distance to the previous keyframe.
let previous_keyframe = keyframes.iter().last().unwrap();
let distance = current_frameno - previous_keyframe;
// Handle minimum and maximum key frame intervals.
if distance < self.opts.min_scenecut_distance.unwrap_or(0) {
return false;
}
if distance
>= self
.opts
.max_scenecut_distance
.unwrap_or(usize::max_value())
{
return true;
}
if self.excluded_frames.contains(¤t_frameno) {
return false;
}
self.has_scenecut(previous_frame, current_frame)
}
/// Uses lookahead to avoid coding short flashes as scenecuts.
/// Saves excluded frame numbers in `self.excluded_frames`.
fn exclude_scene_flashes<T: Pixel>(&mut self, frame_subset: &[&PlaneData<T>], frameno: usize) {
let lookahead_distance = cmp::min(self.opts.lookahead_distance, frame_subset.len() - 1);
// Where A and B are scenes: AAAAAABBBAAAAAA
// If BBB is shorter than lookahead_distance, it is detected as a flash
// and not considered a scenecut.
for j in 1..=lookahead_distance {
if!self.has_scenecut(&frame_subset[0], &frame_subset[j]) {
// Any frame in between `0` and `j` cannot be a real scenecut.
for i in 0..=j {
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
// Where A-F are scenes: AAAAABBCCDDEEFFFFFF
// If each of BB... EE are shorter than `lookahead_distance`, they are
// detected as flashes and not considered scenecuts.
// Instead, the first F frame becomes a scenecut.
// If the video ends before F, no frame becomes a scenecut.
for i in 1..lookahead_distance {
if self.has_scenecut(&frame_subset[i], &frame_subset[lookahead_distance]) {
// If the current frame is the frame before a scenecut, it cannot also be the frame of a scenecut.
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
/// Run a comparison between two frames to determine if they qualify for a scenecut.
///
/// The current algorithm detects fast cuts using changes in colour and intensity between frames.
/// Since the difference between frames is used, only fast cuts are detected
/// with this method. This is intended to change via https://github.com/xiph/rav1e/issues/794.
fn has_scenecut<T: Pixel>(&self, frame1: &PlaneData<T>, frame2: &PlaneData<T>) -> bool {
let mut delta = Self::get_plane_sad(&frame1[0], &frame2[0]);
let mut len = frame1[0].len() as u64;
if self.opts.use_chroma && self.chroma_sampling!= ChromaSampling::Cs400 {
let (x_dec, y_dec) = self.chroma_sampling.get_decimation().unwrap();
let dec = x_dec + y_dec;
delta += Self::get_plane_sad(&frame1[1], &frame2[1]) << dec;
len += (frame1[1].len() as u64) << dec;
delta += Self::get_plane_sad(&frame1[2], &frame2[2]) << dec;
len += (frame1[2].len() as u64) << dec;
}
delta >= self.threshold as u64 * len
}
#[inline(always)]
fn get_plane_sad<T: Pixel>(plane1: &[T], plane2: &[T]) -> u64 {
assert_eq!(plane1.len(), plane2.len());
plane1
.iter()
.zip(plane2.iter())
.map(|(&p1, &p2)| (i16::cast_from(p1) - i16::cast_from(p2)).abs() as u64)
.sum::<u64>()
}
}
| SceneChangeDetector | identifier_name |
debugger.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use std::collections::BTreeMap;
use std::io::Read;
use std::process::{Child, Command};
use anyhow::{bail, format_err, Result};
use debuggable_module::path::FilePath;
use debuggable_module::Address;
use pete::{Ptracer, Restart, Signal, Stop, Tracee};
use procfs::process::{MMPermissions, MMapPath, MemoryMap, Process};
use crate::record::Output;
pub trait DebugEventHandler {
fn on_breakpoint(&mut self, dbg: &mut DebuggerContext, tracee: &mut Tracee) -> Result<()>;
fn on_module_load(
&mut self,
db: &mut DebuggerContext,
tracee: &mut Tracee,
image: &ModuleImage,
) -> Result<()>;
}
pub struct Debugger<'eh> {
context: DebuggerContext,
event_handler: &'eh mut dyn DebugEventHandler,
}
impl<'eh> Debugger<'eh> {
pub fn new(event_handler: &'eh mut dyn DebugEventHandler) -> Self |
pub fn spawn(&mut self, cmd: Command) -> Result<Child> {
Ok(self.context.tracer.spawn(cmd)?)
}
pub fn wait(self, mut child: Child) -> Result<Output> {
if let Err(err) = self.wait_on_stops() {
// Ignore error if child already exited.
let _ = child.kill();
return Err(err);
}
// Currently unavailable on Linux.
let status = None;
let stdout = if let Some(pipe) = &mut child.stdout {
let mut stdout = Vec::new();
pipe.read_to_end(&mut stdout)?;
String::from_utf8_lossy(&stdout).into_owned()
} else {
"".into()
};
let stderr = if let Some(pipe) = &mut child.stderr {
let mut stderr = Vec::new();
pipe.read_to_end(&mut stderr)?;
String::from_utf8_lossy(&stderr).into_owned()
} else {
"".into()
};
// Clean up, ignoring output that we've already gathered.
//
// These calls should also be unnecessary no-ops, but we really want to avoid any dangling
// or zombie child processes.
let _ = child.kill();
let _ = child.wait();
let output = Output {
status,
stderr,
stdout,
};
Ok(output)
}
fn wait_on_stops(mut self) -> Result<()> {
use pete::ptracer::Options;
// Continue the tracee process until the return from its initial `execve()`.
let mut tracee = continue_to_init_execve(&mut self.context.tracer)?;
// Do not follow forks.
//
// After this, we assume that any new tracee is a thread in the same
// group as the root tracee.
let mut options = Options::all();
options.remove(Options::PTRACE_O_TRACEFORK);
options.remove(Options::PTRACE_O_TRACEVFORK);
options.remove(Options::PTRACE_O_TRACEEXEC);
tracee.set_options(options)?;
// Initialize index of mapped modules now that we have a PID to query.
self.context.images = Some(Images::new(tracee.pid.as_raw()));
self.update_images(&mut tracee)?;
// Restart tracee and enter the main debugger loop.
self.context.tracer.restart(tracee, Restart::Syscall)?;
while let Some(mut tracee) = self.context.tracer.wait()? {
match tracee.stop {
Stop::SyscallEnter => trace!("syscall-enter: {:?}", tracee.stop),
Stop::SyscallExit => {
self.update_images(&mut tracee)?;
}
Stop::SignalDelivery {
signal: Signal::SIGTRAP,
} => {
self.restore_and_call_if_breakpoint(&mut tracee)?;
}
Stop::Clone { new: pid } => {
// Only seen when the `VM_CLONE` flag is set, as of Linux 4.15.
info!("new thread: {}", pid);
}
_ => {
debug!("stop: {:?}", tracee.stop);
}
}
if let Err(err) = self.context.tracer.restart(tracee, Restart::Syscall) {
error!("unable to restart tracee: {}", err);
}
}
Ok(())
}
fn restore_and_call_if_breakpoint(&mut self, tracee: &mut Tracee) -> Result<()> {
let mut regs = tracee.registers()?;
#[cfg(target_arch = "x86_64")]
let instruction_pointer = &mut regs.rip;
#[cfg(target_arch = "aarch64")]
let instruction_pointer = &mut regs.pc;
// Compute what the last PC would have been _if_ we stopped due to a soft breakpoint.
//
// If we don't have a registered breakpoint, then we will not use this value.
let pc = Address(instruction_pointer.saturating_sub(1));
if self.context.breakpoints.clear(tracee, pc)? {
// We restored the original, `int3`-clobbered instruction in `clear()`. Now
// set the tracee's registers to execute it on restart. Do this _before_ the
// callback to simulate a hardware breakpoint.
*instruction_pointer = pc.0;
tracee.set_registers(regs)?;
self.event_handler
.on_breakpoint(&mut self.context, tracee)?;
} else {
warn!("no registered breakpoint for SIGTRAP delivery at {pc:x}");
// We didn't fix up a registered soft breakpoint, so we have no reason to
// re-execute the instruction at the last PC. Leave the tracee registers alone.
}
Ok(())
}
fn update_images(&mut self, tracee: &mut Tracee) -> Result<()> {
let images = self
.context
.images
.as_mut()
.ok_or_else(|| format_err!("internal error: recorder images not initialized"))?;
let events = images.update()?;
for (_base, image) in &events.loaded {
self.event_handler
.on_module_load(&mut self.context, tracee, image)?;
}
Ok(())
}
}
pub struct DebuggerContext {
pub breakpoints: Breakpoints,
pub images: Option<Images>,
pub tracer: Ptracer,
}
impl DebuggerContext {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let breakpoints = Breakpoints::default();
let images = None;
let tracer = Ptracer::new();
Self {
breakpoints,
images,
tracer,
}
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
self.images.as_ref()?.find_image_for_addr(addr)
}
}
/// Executable memory-mapped files for a process.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Images {
mapped: BTreeMap<Address, ModuleImage>,
pid: i32,
}
impl Images {
pub fn new(pid: i32) -> Self {
let mapped = BTreeMap::default();
Self { mapped, pid }
}
pub fn mapped(&self) -> impl Iterator<Item = (Address, &ModuleImage)> {
self.mapped.iter().map(|(va, i)| (*va, i))
}
pub fn update(&mut self) -> Result<LoadEvents> {
let proc = Process::new(self.pid)?;
let mut new = BTreeMap::new();
let mut group: Vec<MemoryMap> = vec![];
for map in proc.maps()? {
if let Some(last) = group.last() {
if last.pathname!= map.pathname {
// The current memory mapping is the start of a new group.
//
// Consume the current group, and track any new module image.
if let Ok(image) = ModuleImage::new(group) {
let base = image.base();
new.insert(base, image);
}
// Reset the current group.
group = vec![];
}
}
group.push(map);
}
let events = LoadEvents::new(&self.mapped, &new);
self.mapped = new;
Ok(events)
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
let (_, image) = self.mapped().find(|(_, im)| im.contains(&addr))?;
Some(image)
}
}
/// A `MemoryMap` that is known to be file-backed and executable.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ModuleImage {
base: Address,
maps: Vec<MemoryMap>,
path: FilePath,
}
impl ModuleImage {
// Accepts an increasing sequence of memory mappings with a common file-backed
// pathname.
pub fn new(mut maps: Vec<MemoryMap>) -> Result<Self> {
maps.sort_by_key(|m| m.address);
if maps.is_empty() {
bail!("no mapping for module image");
}
if!maps
.iter()
.any(|m| m.perms.contains(MMPermissions::EXECUTE))
{
bail!("no executable mapping for module image");
}
// Cannot panic due to initial length check.
let first = &maps[0];
let path = if let MMapPath::Path(path) = &first.pathname {
FilePath::new(path.to_string_lossy())?
} else {
bail!("module image mappings must be file-backed");
};
for map in &maps {
if map.pathname!= first.pathname {
bail!("module image mapping not file-backed");
}
}
let base = Address(first.address.0);
let image = ModuleImage { base, maps, path };
Ok(image)
}
pub fn path(&self) -> &FilePath {
&self.path
}
pub fn base(&self) -> Address {
self.base
}
pub fn contains(&self, addr: &Address) -> bool {
for map in &self.maps {
let lo = Address(map.address.0);
let hi = Address(map.address.1);
if (lo..hi).contains(addr) {
return true;
}
}
false
}
}
pub struct LoadEvents {
pub loaded: Vec<(Address, ModuleImage)>,
pub unloaded: Vec<(Address, ModuleImage)>,
}
impl LoadEvents {
pub fn new(old: &BTreeMap<Address, ModuleImage>, new: &BTreeMap<Address, ModuleImage>) -> Self {
// New not in old.
let loaded: Vec<_> = new
.iter()
.filter(|(nva, n)| {
!old.iter()
.any(|(iva, i)| *nva == iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
// Old not in new.
let unloaded: Vec<_> = old
.iter()
.filter(|(iva, i)| {
!new.iter()
.any(|(nva, n)| nva == *iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
Self { loaded, unloaded }
}
}
#[derive(Clone, Debug, Default)]
pub struct Breakpoints {
saved: BTreeMap<Address, u8>,
}
impl Breakpoints {
pub fn set(&mut self, tracee: &mut Tracee, addr: Address) -> Result<()> {
// Return if the breakpoint exists. We don't want to conclude that the
// saved instruction byte was `0xcc`.
if self.saved.contains_key(&addr) {
return Ok(());
}
let mut data = [0u8];
tracee.read_memory_mut(addr.0, &mut data)?;
self.saved.insert(addr, data[0]);
tracee.write_memory(addr.0, &[0xcc])?;
Ok(())
}
pub fn clear(&mut self, tracee: &mut Tracee, addr: Address) -> Result<bool> {
let data = self.saved.remove(&addr);
let cleared = if let Some(data) = data {
tracee.write_memory(addr.0, &[data])?;
true
} else {
false
};
Ok(cleared)
}
}
fn continue_to_init_execve(tracer: &mut Ptracer) -> Result<Tracee> {
while let Some(tracee) = tracer.wait()? {
if let Stop::SyscallExit = &tracee.stop {
return Ok(tracee);
}
tracer.restart(tracee, Restart::Continue)?;
}
bail!("did not see initial execve() in tracee while recording coverage");
}
| {
let context = DebuggerContext::new();
Self {
context,
event_handler,
}
} | identifier_body |
debugger.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use std::collections::BTreeMap;
use std::io::Read;
use std::process::{Child, Command};
use anyhow::{bail, format_err, Result};
use debuggable_module::path::FilePath;
use debuggable_module::Address;
use pete::{Ptracer, Restart, Signal, Stop, Tracee};
use procfs::process::{MMPermissions, MMapPath, MemoryMap, Process};
use crate::record::Output;
pub trait DebugEventHandler {
fn on_breakpoint(&mut self, dbg: &mut DebuggerContext, tracee: &mut Tracee) -> Result<()>;
fn on_module_load(
&mut self,
db: &mut DebuggerContext,
tracee: &mut Tracee,
image: &ModuleImage,
) -> Result<()>;
}
pub struct Debugger<'eh> {
context: DebuggerContext,
event_handler: &'eh mut dyn DebugEventHandler,
}
impl<'eh> Debugger<'eh> {
pub fn new(event_handler: &'eh mut dyn DebugEventHandler) -> Self {
let context = DebuggerContext::new();
Self {
context,
event_handler,
}
}
pub fn spawn(&mut self, cmd: Command) -> Result<Child> {
Ok(self.context.tracer.spawn(cmd)?)
}
pub fn wait(self, mut child: Child) -> Result<Output> {
if let Err(err) = self.wait_on_stops() {
// Ignore error if child already exited.
let _ = child.kill();
return Err(err);
}
// Currently unavailable on Linux.
let status = None;
let stdout = if let Some(pipe) = &mut child.stdout {
let mut stdout = Vec::new();
pipe.read_to_end(&mut stdout)?;
String::from_utf8_lossy(&stdout).into_owned()
} else {
"".into()
};
let stderr = if let Some(pipe) = &mut child.stderr {
let mut stderr = Vec::new();
pipe.read_to_end(&mut stderr)?;
String::from_utf8_lossy(&stderr).into_owned()
} else {
"".into()
};
// Clean up, ignoring output that we've already gathered.
//
// These calls should also be unnecessary no-ops, but we really want to avoid any dangling
// or zombie child processes.
let _ = child.kill();
let _ = child.wait();
let output = Output {
status,
stderr,
stdout,
};
Ok(output)
}
fn wait_on_stops(mut self) -> Result<()> {
use pete::ptracer::Options;
// Continue the tracee process until the return from its initial `execve()`.
let mut tracee = continue_to_init_execve(&mut self.context.tracer)?;
// Do not follow forks.
//
// After this, we assume that any new tracee is a thread in the same
// group as the root tracee.
let mut options = Options::all();
options.remove(Options::PTRACE_O_TRACEFORK);
options.remove(Options::PTRACE_O_TRACEVFORK);
options.remove(Options::PTRACE_O_TRACEEXEC);
tracee.set_options(options)?;
// Initialize index of mapped modules now that we have a PID to query.
self.context.images = Some(Images::new(tracee.pid.as_raw()));
self.update_images(&mut tracee)?;
// Restart tracee and enter the main debugger loop.
self.context.tracer.restart(tracee, Restart::Syscall)?;
while let Some(mut tracee) = self.context.tracer.wait()? {
match tracee.stop {
Stop::SyscallEnter => trace!("syscall-enter: {:?}", tracee.stop),
Stop::SyscallExit => {
self.update_images(&mut tracee)?;
}
Stop::SignalDelivery {
signal: Signal::SIGTRAP,
} => {
self.restore_and_call_if_breakpoint(&mut tracee)?;
}
Stop::Clone { new: pid } => {
// Only seen when the `VM_CLONE` flag is set, as of Linux 4.15.
info!("new thread: {}", pid);
}
_ => {
debug!("stop: {:?}", tracee.stop);
}
}
if let Err(err) = self.context.tracer.restart(tracee, Restart::Syscall) {
error!("unable to restart tracee: {}", err);
}
}
Ok(())
}
fn restore_and_call_if_breakpoint(&mut self, tracee: &mut Tracee) -> Result<()> {
let mut regs = tracee.registers()?;
|
#[cfg(target_arch = "aarch64")]
let instruction_pointer = &mut regs.pc;
// Compute what the last PC would have been _if_ we stopped due to a soft breakpoint.
//
// If we don't have a registered breakpoint, then we will not use this value.
let pc = Address(instruction_pointer.saturating_sub(1));
if self.context.breakpoints.clear(tracee, pc)? {
// We restored the original, `int3`-clobbered instruction in `clear()`. Now
// set the tracee's registers to execute it on restart. Do this _before_ the
// callback to simulate a hardware breakpoint.
*instruction_pointer = pc.0;
tracee.set_registers(regs)?;
self.event_handler
.on_breakpoint(&mut self.context, tracee)?;
} else {
warn!("no registered breakpoint for SIGTRAP delivery at {pc:x}");
// We didn't fix up a registered soft breakpoint, so we have no reason to
// re-execute the instruction at the last PC. Leave the tracee registers alone.
}
Ok(())
}
fn update_images(&mut self, tracee: &mut Tracee) -> Result<()> {
let images = self
.context
.images
.as_mut()
.ok_or_else(|| format_err!("internal error: recorder images not initialized"))?;
let events = images.update()?;
for (_base, image) in &events.loaded {
self.event_handler
.on_module_load(&mut self.context, tracee, image)?;
}
Ok(())
}
}
pub struct DebuggerContext {
pub breakpoints: Breakpoints,
pub images: Option<Images>,
pub tracer: Ptracer,
}
impl DebuggerContext {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let breakpoints = Breakpoints::default();
let images = None;
let tracer = Ptracer::new();
Self {
breakpoints,
images,
tracer,
}
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
self.images.as_ref()?.find_image_for_addr(addr)
}
}
/// Executable memory-mapped files for a process.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Images {
mapped: BTreeMap<Address, ModuleImage>,
pid: i32,
}
impl Images {
pub fn new(pid: i32) -> Self {
let mapped = BTreeMap::default();
Self { mapped, pid }
}
pub fn mapped(&self) -> impl Iterator<Item = (Address, &ModuleImage)> {
self.mapped.iter().map(|(va, i)| (*va, i))
}
pub fn update(&mut self) -> Result<LoadEvents> {
let proc = Process::new(self.pid)?;
let mut new = BTreeMap::new();
let mut group: Vec<MemoryMap> = vec![];
for map in proc.maps()? {
if let Some(last) = group.last() {
if last.pathname!= map.pathname {
// The current memory mapping is the start of a new group.
//
// Consume the current group, and track any new module image.
if let Ok(image) = ModuleImage::new(group) {
let base = image.base();
new.insert(base, image);
}
// Reset the current group.
group = vec![];
}
}
group.push(map);
}
let events = LoadEvents::new(&self.mapped, &new);
self.mapped = new;
Ok(events)
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
let (_, image) = self.mapped().find(|(_, im)| im.contains(&addr))?;
Some(image)
}
}
/// A `MemoryMap` that is known to be file-backed and executable.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ModuleImage {
base: Address,
maps: Vec<MemoryMap>,
path: FilePath,
}
impl ModuleImage {
// Accepts an increasing sequence of memory mappings with a common file-backed
// pathname.
pub fn new(mut maps: Vec<MemoryMap>) -> Result<Self> {
maps.sort_by_key(|m| m.address);
if maps.is_empty() {
bail!("no mapping for module image");
}
if!maps
.iter()
.any(|m| m.perms.contains(MMPermissions::EXECUTE))
{
bail!("no executable mapping for module image");
}
// Cannot panic due to initial length check.
let first = &maps[0];
let path = if let MMapPath::Path(path) = &first.pathname {
FilePath::new(path.to_string_lossy())?
} else {
bail!("module image mappings must be file-backed");
};
for map in &maps {
if map.pathname!= first.pathname {
bail!("module image mapping not file-backed");
}
}
let base = Address(first.address.0);
let image = ModuleImage { base, maps, path };
Ok(image)
}
pub fn path(&self) -> &FilePath {
&self.path
}
pub fn base(&self) -> Address {
self.base
}
pub fn contains(&self, addr: &Address) -> bool {
for map in &self.maps {
let lo = Address(map.address.0);
let hi = Address(map.address.1);
if (lo..hi).contains(addr) {
return true;
}
}
false
}
}
pub struct LoadEvents {
pub loaded: Vec<(Address, ModuleImage)>,
pub unloaded: Vec<(Address, ModuleImage)>,
}
impl LoadEvents {
pub fn new(old: &BTreeMap<Address, ModuleImage>, new: &BTreeMap<Address, ModuleImage>) -> Self {
// New not in old.
let loaded: Vec<_> = new
.iter()
.filter(|(nva, n)| {
!old.iter()
.any(|(iva, i)| *nva == iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
// Old not in new.
let unloaded: Vec<_> = old
.iter()
.filter(|(iva, i)| {
!new.iter()
.any(|(nva, n)| nva == *iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
Self { loaded, unloaded }
}
}
#[derive(Clone, Debug, Default)]
pub struct Breakpoints {
saved: BTreeMap<Address, u8>,
}
impl Breakpoints {
pub fn set(&mut self, tracee: &mut Tracee, addr: Address) -> Result<()> {
// Return if the breakpoint exists. We don't want to conclude that the
// saved instruction byte was `0xcc`.
if self.saved.contains_key(&addr) {
return Ok(());
}
let mut data = [0u8];
tracee.read_memory_mut(addr.0, &mut data)?;
self.saved.insert(addr, data[0]);
tracee.write_memory(addr.0, &[0xcc])?;
Ok(())
}
pub fn clear(&mut self, tracee: &mut Tracee, addr: Address) -> Result<bool> {
let data = self.saved.remove(&addr);
let cleared = if let Some(data) = data {
tracee.write_memory(addr.0, &[data])?;
true
} else {
false
};
Ok(cleared)
}
}
fn continue_to_init_execve(tracer: &mut Ptracer) -> Result<Tracee> {
while let Some(tracee) = tracer.wait()? {
if let Stop::SyscallExit = &tracee.stop {
return Ok(tracee);
}
tracer.restart(tracee, Restart::Continue)?;
}
bail!("did not see initial execve() in tracee while recording coverage");
} | #[cfg(target_arch = "x86_64")]
let instruction_pointer = &mut regs.rip; | random_line_split |
debugger.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use std::collections::BTreeMap;
use std::io::Read;
use std::process::{Child, Command};
use anyhow::{bail, format_err, Result};
use debuggable_module::path::FilePath;
use debuggable_module::Address;
use pete::{Ptracer, Restart, Signal, Stop, Tracee};
use procfs::process::{MMPermissions, MMapPath, MemoryMap, Process};
use crate::record::Output;
pub trait DebugEventHandler {
fn on_breakpoint(&mut self, dbg: &mut DebuggerContext, tracee: &mut Tracee) -> Result<()>;
fn on_module_load(
&mut self,
db: &mut DebuggerContext,
tracee: &mut Tracee,
image: &ModuleImage,
) -> Result<()>;
}
pub struct Debugger<'eh> {
context: DebuggerContext,
event_handler: &'eh mut dyn DebugEventHandler,
}
impl<'eh> Debugger<'eh> {
pub fn new(event_handler: &'eh mut dyn DebugEventHandler) -> Self {
let context = DebuggerContext::new();
Self {
context,
event_handler,
}
}
pub fn spawn(&mut self, cmd: Command) -> Result<Child> {
Ok(self.context.tracer.spawn(cmd)?)
}
pub fn | (self, mut child: Child) -> Result<Output> {
if let Err(err) = self.wait_on_stops() {
// Ignore error if child already exited.
let _ = child.kill();
return Err(err);
}
// Currently unavailable on Linux.
let status = None;
let stdout = if let Some(pipe) = &mut child.stdout {
let mut stdout = Vec::new();
pipe.read_to_end(&mut stdout)?;
String::from_utf8_lossy(&stdout).into_owned()
} else {
"".into()
};
let stderr = if let Some(pipe) = &mut child.stderr {
let mut stderr = Vec::new();
pipe.read_to_end(&mut stderr)?;
String::from_utf8_lossy(&stderr).into_owned()
} else {
"".into()
};
// Clean up, ignoring output that we've already gathered.
//
// These calls should also be unnecessary no-ops, but we really want to avoid any dangling
// or zombie child processes.
let _ = child.kill();
let _ = child.wait();
let output = Output {
status,
stderr,
stdout,
};
Ok(output)
}
fn wait_on_stops(mut self) -> Result<()> {
use pete::ptracer::Options;
// Continue the tracee process until the return from its initial `execve()`.
let mut tracee = continue_to_init_execve(&mut self.context.tracer)?;
// Do not follow forks.
//
// After this, we assume that any new tracee is a thread in the same
// group as the root tracee.
let mut options = Options::all();
options.remove(Options::PTRACE_O_TRACEFORK);
options.remove(Options::PTRACE_O_TRACEVFORK);
options.remove(Options::PTRACE_O_TRACEEXEC);
tracee.set_options(options)?;
// Initialize index of mapped modules now that we have a PID to query.
self.context.images = Some(Images::new(tracee.pid.as_raw()));
self.update_images(&mut tracee)?;
// Restart tracee and enter the main debugger loop.
self.context.tracer.restart(tracee, Restart::Syscall)?;
while let Some(mut tracee) = self.context.tracer.wait()? {
match tracee.stop {
Stop::SyscallEnter => trace!("syscall-enter: {:?}", tracee.stop),
Stop::SyscallExit => {
self.update_images(&mut tracee)?;
}
Stop::SignalDelivery {
signal: Signal::SIGTRAP,
} => {
self.restore_and_call_if_breakpoint(&mut tracee)?;
}
Stop::Clone { new: pid } => {
// Only seen when the `VM_CLONE` flag is set, as of Linux 4.15.
info!("new thread: {}", pid);
}
_ => {
debug!("stop: {:?}", tracee.stop);
}
}
if let Err(err) = self.context.tracer.restart(tracee, Restart::Syscall) {
error!("unable to restart tracee: {}", err);
}
}
Ok(())
}
fn restore_and_call_if_breakpoint(&mut self, tracee: &mut Tracee) -> Result<()> {
let mut regs = tracee.registers()?;
#[cfg(target_arch = "x86_64")]
let instruction_pointer = &mut regs.rip;
#[cfg(target_arch = "aarch64")]
let instruction_pointer = &mut regs.pc;
// Compute what the last PC would have been _if_ we stopped due to a soft breakpoint.
//
// If we don't have a registered breakpoint, then we will not use this value.
let pc = Address(instruction_pointer.saturating_sub(1));
if self.context.breakpoints.clear(tracee, pc)? {
// We restored the original, `int3`-clobbered instruction in `clear()`. Now
// set the tracee's registers to execute it on restart. Do this _before_ the
// callback to simulate a hardware breakpoint.
*instruction_pointer = pc.0;
tracee.set_registers(regs)?;
self.event_handler
.on_breakpoint(&mut self.context, tracee)?;
} else {
warn!("no registered breakpoint for SIGTRAP delivery at {pc:x}");
// We didn't fix up a registered soft breakpoint, so we have no reason to
// re-execute the instruction at the last PC. Leave the tracee registers alone.
}
Ok(())
}
fn update_images(&mut self, tracee: &mut Tracee) -> Result<()> {
let images = self
.context
.images
.as_mut()
.ok_or_else(|| format_err!("internal error: recorder images not initialized"))?;
let events = images.update()?;
for (_base, image) in &events.loaded {
self.event_handler
.on_module_load(&mut self.context, tracee, image)?;
}
Ok(())
}
}
pub struct DebuggerContext {
pub breakpoints: Breakpoints,
pub images: Option<Images>,
pub tracer: Ptracer,
}
impl DebuggerContext {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let breakpoints = Breakpoints::default();
let images = None;
let tracer = Ptracer::new();
Self {
breakpoints,
images,
tracer,
}
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
self.images.as_ref()?.find_image_for_addr(addr)
}
}
/// Executable memory-mapped files for a process.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Images {
mapped: BTreeMap<Address, ModuleImage>,
pid: i32,
}
impl Images {
pub fn new(pid: i32) -> Self {
let mapped = BTreeMap::default();
Self { mapped, pid }
}
pub fn mapped(&self) -> impl Iterator<Item = (Address, &ModuleImage)> {
self.mapped.iter().map(|(va, i)| (*va, i))
}
pub fn update(&mut self) -> Result<LoadEvents> {
let proc = Process::new(self.pid)?;
let mut new = BTreeMap::new();
let mut group: Vec<MemoryMap> = vec![];
for map in proc.maps()? {
if let Some(last) = group.last() {
if last.pathname!= map.pathname {
// The current memory mapping is the start of a new group.
//
// Consume the current group, and track any new module image.
if let Ok(image) = ModuleImage::new(group) {
let base = image.base();
new.insert(base, image);
}
// Reset the current group.
group = vec![];
}
}
group.push(map);
}
let events = LoadEvents::new(&self.mapped, &new);
self.mapped = new;
Ok(events)
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
let (_, image) = self.mapped().find(|(_, im)| im.contains(&addr))?;
Some(image)
}
}
/// A `MemoryMap` that is known to be file-backed and executable.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ModuleImage {
base: Address,
maps: Vec<MemoryMap>,
path: FilePath,
}
impl ModuleImage {
// Accepts an increasing sequence of memory mappings with a common file-backed
// pathname.
pub fn new(mut maps: Vec<MemoryMap>) -> Result<Self> {
maps.sort_by_key(|m| m.address);
if maps.is_empty() {
bail!("no mapping for module image");
}
if!maps
.iter()
.any(|m| m.perms.contains(MMPermissions::EXECUTE))
{
bail!("no executable mapping for module image");
}
// Cannot panic due to initial length check.
let first = &maps[0];
let path = if let MMapPath::Path(path) = &first.pathname {
FilePath::new(path.to_string_lossy())?
} else {
bail!("module image mappings must be file-backed");
};
for map in &maps {
if map.pathname!= first.pathname {
bail!("module image mapping not file-backed");
}
}
let base = Address(first.address.0);
let image = ModuleImage { base, maps, path };
Ok(image)
}
pub fn path(&self) -> &FilePath {
&self.path
}
pub fn base(&self) -> Address {
self.base
}
pub fn contains(&self, addr: &Address) -> bool {
for map in &self.maps {
let lo = Address(map.address.0);
let hi = Address(map.address.1);
if (lo..hi).contains(addr) {
return true;
}
}
false
}
}
pub struct LoadEvents {
pub loaded: Vec<(Address, ModuleImage)>,
pub unloaded: Vec<(Address, ModuleImage)>,
}
impl LoadEvents {
pub fn new(old: &BTreeMap<Address, ModuleImage>, new: &BTreeMap<Address, ModuleImage>) -> Self {
// New not in old.
let loaded: Vec<_> = new
.iter()
.filter(|(nva, n)| {
!old.iter()
.any(|(iva, i)| *nva == iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
// Old not in new.
let unloaded: Vec<_> = old
.iter()
.filter(|(iva, i)| {
!new.iter()
.any(|(nva, n)| nva == *iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
Self { loaded, unloaded }
}
}
#[derive(Clone, Debug, Default)]
pub struct Breakpoints {
saved: BTreeMap<Address, u8>,
}
impl Breakpoints {
pub fn set(&mut self, tracee: &mut Tracee, addr: Address) -> Result<()> {
// Return if the breakpoint exists. We don't want to conclude that the
// saved instruction byte was `0xcc`.
if self.saved.contains_key(&addr) {
return Ok(());
}
let mut data = [0u8];
tracee.read_memory_mut(addr.0, &mut data)?;
self.saved.insert(addr, data[0]);
tracee.write_memory(addr.0, &[0xcc])?;
Ok(())
}
pub fn clear(&mut self, tracee: &mut Tracee, addr: Address) -> Result<bool> {
let data = self.saved.remove(&addr);
let cleared = if let Some(data) = data {
tracee.write_memory(addr.0, &[data])?;
true
} else {
false
};
Ok(cleared)
}
}
fn continue_to_init_execve(tracer: &mut Ptracer) -> Result<Tracee> {
while let Some(tracee) = tracer.wait()? {
if let Stop::SyscallExit = &tracee.stop {
return Ok(tracee);
}
tracer.restart(tracee, Restart::Continue)?;
}
bail!("did not see initial execve() in tracee while recording coverage");
}
| wait | identifier_name |
debugger.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use std::collections::BTreeMap;
use std::io::Read;
use std::process::{Child, Command};
use anyhow::{bail, format_err, Result};
use debuggable_module::path::FilePath;
use debuggable_module::Address;
use pete::{Ptracer, Restart, Signal, Stop, Tracee};
use procfs::process::{MMPermissions, MMapPath, MemoryMap, Process};
use crate::record::Output;
pub trait DebugEventHandler {
fn on_breakpoint(&mut self, dbg: &mut DebuggerContext, tracee: &mut Tracee) -> Result<()>;
fn on_module_load(
&mut self,
db: &mut DebuggerContext,
tracee: &mut Tracee,
image: &ModuleImage,
) -> Result<()>;
}
pub struct Debugger<'eh> {
context: DebuggerContext,
event_handler: &'eh mut dyn DebugEventHandler,
}
impl<'eh> Debugger<'eh> {
pub fn new(event_handler: &'eh mut dyn DebugEventHandler) -> Self {
let context = DebuggerContext::new();
Self {
context,
event_handler,
}
}
pub fn spawn(&mut self, cmd: Command) -> Result<Child> {
Ok(self.context.tracer.spawn(cmd)?)
}
pub fn wait(self, mut child: Child) -> Result<Output> {
if let Err(err) = self.wait_on_stops() {
// Ignore error if child already exited.
let _ = child.kill();
return Err(err);
}
// Currently unavailable on Linux.
let status = None;
let stdout = if let Some(pipe) = &mut child.stdout {
let mut stdout = Vec::new();
pipe.read_to_end(&mut stdout)?;
String::from_utf8_lossy(&stdout).into_owned()
} else {
"".into()
};
let stderr = if let Some(pipe) = &mut child.stderr {
let mut stderr = Vec::new();
pipe.read_to_end(&mut stderr)?;
String::from_utf8_lossy(&stderr).into_owned()
} else {
"".into()
};
// Clean up, ignoring output that we've already gathered.
//
// These calls should also be unnecessary no-ops, but we really want to avoid any dangling
// or zombie child processes.
let _ = child.kill();
let _ = child.wait();
let output = Output {
status,
stderr,
stdout,
};
Ok(output)
}
fn wait_on_stops(mut self) -> Result<()> {
use pete::ptracer::Options;
// Continue the tracee process until the return from its initial `execve()`.
let mut tracee = continue_to_init_execve(&mut self.context.tracer)?;
// Do not follow forks.
//
// After this, we assume that any new tracee is a thread in the same
// group as the root tracee.
let mut options = Options::all();
options.remove(Options::PTRACE_O_TRACEFORK);
options.remove(Options::PTRACE_O_TRACEVFORK);
options.remove(Options::PTRACE_O_TRACEEXEC);
tracee.set_options(options)?;
// Initialize index of mapped modules now that we have a PID to query.
self.context.images = Some(Images::new(tracee.pid.as_raw()));
self.update_images(&mut tracee)?;
// Restart tracee and enter the main debugger loop.
self.context.tracer.restart(tracee, Restart::Syscall)?;
while let Some(mut tracee) = self.context.tracer.wait()? {
match tracee.stop {
Stop::SyscallEnter => trace!("syscall-enter: {:?}", tracee.stop),
Stop::SyscallExit => {
self.update_images(&mut tracee)?;
}
Stop::SignalDelivery {
signal: Signal::SIGTRAP,
} => {
self.restore_and_call_if_breakpoint(&mut tracee)?;
}
Stop::Clone { new: pid } => {
// Only seen when the `VM_CLONE` flag is set, as of Linux 4.15.
info!("new thread: {}", pid);
}
_ => {
debug!("stop: {:?}", tracee.stop);
}
}
if let Err(err) = self.context.tracer.restart(tracee, Restart::Syscall) {
error!("unable to restart tracee: {}", err);
}
}
Ok(())
}
fn restore_and_call_if_breakpoint(&mut self, tracee: &mut Tracee) -> Result<()> {
let mut regs = tracee.registers()?;
#[cfg(target_arch = "x86_64")]
let instruction_pointer = &mut regs.rip;
#[cfg(target_arch = "aarch64")]
let instruction_pointer = &mut regs.pc;
// Compute what the last PC would have been _if_ we stopped due to a soft breakpoint.
//
// If we don't have a registered breakpoint, then we will not use this value.
let pc = Address(instruction_pointer.saturating_sub(1));
if self.context.breakpoints.clear(tracee, pc)? {
// We restored the original, `int3`-clobbered instruction in `clear()`. Now
// set the tracee's registers to execute it on restart. Do this _before_ the
// callback to simulate a hardware breakpoint.
*instruction_pointer = pc.0;
tracee.set_registers(regs)?;
self.event_handler
.on_breakpoint(&mut self.context, tracee)?;
} else {
warn!("no registered breakpoint for SIGTRAP delivery at {pc:x}");
// We didn't fix up a registered soft breakpoint, so we have no reason to
// re-execute the instruction at the last PC. Leave the tracee registers alone.
}
Ok(())
}
fn update_images(&mut self, tracee: &mut Tracee) -> Result<()> {
let images = self
.context
.images
.as_mut()
.ok_or_else(|| format_err!("internal error: recorder images not initialized"))?;
let events = images.update()?;
for (_base, image) in &events.loaded {
self.event_handler
.on_module_load(&mut self.context, tracee, image)?;
}
Ok(())
}
}
pub struct DebuggerContext {
pub breakpoints: Breakpoints,
pub images: Option<Images>,
pub tracer: Ptracer,
}
impl DebuggerContext {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let breakpoints = Breakpoints::default();
let images = None;
let tracer = Ptracer::new();
Self {
breakpoints,
images,
tracer,
}
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
self.images.as_ref()?.find_image_for_addr(addr)
}
}
/// Executable memory-mapped files for a process.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Images {
mapped: BTreeMap<Address, ModuleImage>,
pid: i32,
}
impl Images {
pub fn new(pid: i32) -> Self {
let mapped = BTreeMap::default();
Self { mapped, pid }
}
pub fn mapped(&self) -> impl Iterator<Item = (Address, &ModuleImage)> {
self.mapped.iter().map(|(va, i)| (*va, i))
}
pub fn update(&mut self) -> Result<LoadEvents> {
let proc = Process::new(self.pid)?;
let mut new = BTreeMap::new();
let mut group: Vec<MemoryMap> = vec![];
for map in proc.maps()? {
if let Some(last) = group.last() {
if last.pathname!= map.pathname {
// The current memory mapping is the start of a new group.
//
// Consume the current group, and track any new module image.
if let Ok(image) = ModuleImage::new(group) {
let base = image.base();
new.insert(base, image);
}
// Reset the current group.
group = vec![];
}
}
group.push(map);
}
let events = LoadEvents::new(&self.mapped, &new);
self.mapped = new;
Ok(events)
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
let (_, image) = self.mapped().find(|(_, im)| im.contains(&addr))?;
Some(image)
}
}
/// A `MemoryMap` that is known to be file-backed and executable.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ModuleImage {
base: Address,
maps: Vec<MemoryMap>,
path: FilePath,
}
impl ModuleImage {
// Accepts an increasing sequence of memory mappings with a common file-backed
// pathname.
pub fn new(mut maps: Vec<MemoryMap>) -> Result<Self> {
maps.sort_by_key(|m| m.address);
if maps.is_empty() {
bail!("no mapping for module image");
}
if!maps
.iter()
.any(|m| m.perms.contains(MMPermissions::EXECUTE))
|
// Cannot panic due to initial length check.
let first = &maps[0];
let path = if let MMapPath::Path(path) = &first.pathname {
FilePath::new(path.to_string_lossy())?
} else {
bail!("module image mappings must be file-backed");
};
for map in &maps {
if map.pathname!= first.pathname {
bail!("module image mapping not file-backed");
}
}
let base = Address(first.address.0);
let image = ModuleImage { base, maps, path };
Ok(image)
}
pub fn path(&self) -> &FilePath {
&self.path
}
pub fn base(&self) -> Address {
self.base
}
pub fn contains(&self, addr: &Address) -> bool {
for map in &self.maps {
let lo = Address(map.address.0);
let hi = Address(map.address.1);
if (lo..hi).contains(addr) {
return true;
}
}
false
}
}
pub struct LoadEvents {
pub loaded: Vec<(Address, ModuleImage)>,
pub unloaded: Vec<(Address, ModuleImage)>,
}
impl LoadEvents {
pub fn new(old: &BTreeMap<Address, ModuleImage>, new: &BTreeMap<Address, ModuleImage>) -> Self {
// New not in old.
let loaded: Vec<_> = new
.iter()
.filter(|(nva, n)| {
!old.iter()
.any(|(iva, i)| *nva == iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
// Old not in new.
let unloaded: Vec<_> = old
.iter()
.filter(|(iva, i)| {
!new.iter()
.any(|(nva, n)| nva == *iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
Self { loaded, unloaded }
}
}
#[derive(Clone, Debug, Default)]
pub struct Breakpoints {
saved: BTreeMap<Address, u8>,
}
impl Breakpoints {
pub fn set(&mut self, tracee: &mut Tracee, addr: Address) -> Result<()> {
// Return if the breakpoint exists. We don't want to conclude that the
// saved instruction byte was `0xcc`.
if self.saved.contains_key(&addr) {
return Ok(());
}
let mut data = [0u8];
tracee.read_memory_mut(addr.0, &mut data)?;
self.saved.insert(addr, data[0]);
tracee.write_memory(addr.0, &[0xcc])?;
Ok(())
}
pub fn clear(&mut self, tracee: &mut Tracee, addr: Address) -> Result<bool> {
let data = self.saved.remove(&addr);
let cleared = if let Some(data) = data {
tracee.write_memory(addr.0, &[data])?;
true
} else {
false
};
Ok(cleared)
}
}
fn continue_to_init_execve(tracer: &mut Ptracer) -> Result<Tracee> {
while let Some(tracee) = tracer.wait()? {
if let Stop::SyscallExit = &tracee.stop {
return Ok(tracee);
}
tracer.restart(tracee, Restart::Continue)?;
}
bail!("did not see initial execve() in tracee while recording coverage");
}
| {
bail!("no executable mapping for module image");
} | conditional_block |
lib.rs | //! [![license:MIT/Apache-2.0][1]](https://github.com/uazu/stakker)
//! [![github:uazu/stakker][2]](https://github.com/uazu/stakker)
//! [![crates.io:stakker][3]](https://crates.io/crates/stakker)
//! [![docs.rs:stakker][4]](https://docs.rs/stakker)
//! [![uazu.github.io:stakker][5]](https://uazu.github.io/stakker/)
//!
//! [1]: https://img.shields.io/badge/license-MIT%2FApache--2.0-blue
//! [2]: https://img.shields.io/badge/github-uazu%2Fstakker-brightgreen
//! [3]: https://img.shields.io/badge/crates.io-stakker-red
//! [4]: https://img.shields.io/badge/docs.rs-stakker-purple
//! [5]: https://img.shields.io/badge/uazu.github.io-stakker-yellow
//!
//! **Stakker** is a lightweight low-level single-threaded actor
//! runtime. It is designed to be layered on top of whatever event
//! source or main loop the user prefers to use. Asynchronous calls
//! are addressed to individual methods within an actor, rather like
//! Pony behaviours. All calls and argument types are known and
//! statically checked at compile-time giving the optimiser a lot of
//! scope. **Stakker** also provides a timer queue for timeouts or
//! delayed calls, a lazy queue to allow batching recent operations,
//! and an idle queue for running a call when nothing else is
//! outstanding.
//!
//! By default **Stakker** uses unsafe code for better time and memory
//! efficiency. However if you prefer to avoid unsafe code, then
//! enable the **no-unsafe** feature which compiles the whole crate
//! with `forbid(unsafe_code)`. Safe alternatives will be used, at
//! some cost in time and memory. There are other features that
//! provide finer-grained control (see below).
//!
//! - [Overview of types](#overview-of-types)
//! - [Efficiency](#efficiency)
//! - [Cargo features](#cargo-features)
//! - [Testing](#testing)
//! - [Tutorial example](#tutorial-example)
//! - [Main loop examples](#main-loop-examples)
//! - [Why the name **Stakker**?](#why-the-name-stakker)
//!
//! See the [Stakker Guide and Design
//! Notes](https://uazu.github.io/stakker/) for additional
//! documentation.
//!
//!
//! # Overview of types
//!
//! [`Actor`] and [`ActorOwn`] are ref-counting references to an
//! actor. Create an actor with [`actor!`] and call it with
//! [`call!`].
//!
//! [`Fwd`] and [`Ret`] forward data to another destination
//! asynchronously, typically to a particular entry-point in a
//! particular actor. So [`Fwd`] and [`Ret`] instances take the role
//! of callback functions. The difference between them is that
//! [`Fwd`] may be called multiple times, is ref-counted for cheap
//! cloning and is based on a `Fn` with `Copy`, whereas [`Ret`] can be
//! used only once, is based on `FnOnce` and is a "move" value. Also
//! the [`Ret`] end-point is informed if the [`Ret`] instance is
//! dropped without sending back a message, for example if a zombie
//! actor is called. See the [`fwd_*!`](#macros) and
//! [`ret_*!`](#macros) macros for creation of instances, and [`fwd!`]
//! and [`ret!`] to make use of them.
//!
//! [`Stakker`] is the external interface to the runtime, i.e. how it
//! is managed from the event loop, or during startup.
//!
//! [`Cx`] is the context passed to all actor methods. It gives
//! access to methods related to the actor being called. It also
//! gives access to [`Core`].
//!
//! [`Core`] is the part of [`Stakker`] which is accessible to actors
//! during actor calls via [`Cx`]. Both [`Stakker`] and [`Cx`]
//! references dereference to [`Core`] and can be used wherever a
//! [`Core`] ref is required.
//!
//! [`Share`] allows a mutable structure to be shared safely between
//! actors, a bit like IPC shared-memory but with guaranteed exclusive
//! access. This may be used for efficiency, like shared-memory
//! buffers are sometimes used between OS processes.
//!
//! [`Deferrer`] allows queuing things to run from `Drop` handlers or
//! from other places in the main thread without access to [`Core`].
//! All actors have a built-in [`Deferrer`] which can be used from
//! outside the actor.
//!
//! For interfacing with other threads, [`PipedThread`] wraps a thread
//! and handles all data transfer to/from it and all cleanup.
//! [`Channel`] allows other threads to send messages to an actor.
//! [`Waker`] is a primitive which allows channels and other data
//! transfer to the main thread to be coordinated. [See here](sync)
//! for more details.
//!
//!
//! # Efficiency
//!
//! A significant aim in the development of **Stakker** was to be
//! lightweight and to minimize overheads in time and memory, and to
//! scale well. Another significant aim was to be "as simple as
//! possible but no simpler", to try to find an optimal set of types
//! and operations that provide the required functionality and
//! ergonomics and that fit the Rust model, to make maximum use of the
//! guarantees that Rust provides.
//!
//! By default **Stakker** uses [`TCell`](https://docs.rs/qcell) or
//! [`TLCell`](https://docs.rs/qcell) for zero-cost protected access
//! to actor state, which also guarantees at compile-time that no
//! actor can directly access any other actor.
//!
//! By default a cut-down ref-counting implementation is used instead
//! of `Rc`, which saves around one `usize` per [`Actor`] or [`Fwd`]
//! instance.
//!
//! With default features, only one thread is allowed to run a
//! [`Stakker`] instance, which enables an optimisation which uses a
//! global variable for the [`Deferrer`] defer queue (used for drop
//! handlers). However if more [`Stakker`] instances need to be run,
//! then the **multi-thread** or **multi-stakker** features cause it
//! to use alternative implementations.
//!
//! All deferred operations, including all async actor calls, are
//! handled as `FnOnce` instances on a queue. The aim is to make this
//! cheap enough so that deferring something doesn't have to be a big
//! decision. Thanks to Rust's inlining, these are efficient -- the
//! compiler might even choose to inline the internal code of the
//! actor call into the `FnOnce`, as that is all known at
//! compile-time.
//!
//! By default the `FnOnce` queue is a flat heterogeneous queue,
//! storing the closures directly in a byte `Vec`, which should give
//! best performance and cache locality at the cost of some unsafe
//! code. However a fully-safe boxed closure queue implementation is
//! also available.
//!
//! Forwarding handlers ([`Fwd`]) are boxed `Fn` instances along with
//! a ref-count. Return handlers ([`Ret`]) are boxed `FnOnce`
//! instances. Both typically queue a `FnOnce` operation when | //!
//! If no inter-thread operations are active, then **Stakker** will
//! never do locking or any atomic operations, nor block for any
//! reason. So the code can execute at full speed without triggering
//! any CPU memory fences or whatever. Usually the only thing that
//! blocks would be the external I/O poller whilst waiting for I/O or
//! timer expiry. When other threads have been started and they defer
//! wake-ups to the main thread, this is handled as an I/O event which
//! causes the wake flags to be checked using atomic operations.
//!
//!
//! # Cargo features
//!
//! Cargo features in **Stakker** do not change **Stakker**'s public
//! API. The API stays the same, but the implementation behind the
//! API changes.
//!
//! Also, cargo features are additive. This means that if one crate
//! using **Stakker** enables a feature, then it is enabled for all
//! uses of **Stakker** in the build. So when features switch between
//! alternative implementations, enabling a feature has to result in
//! the more tolerant implementation, because all users of the crate
//! have to be able to work with this configuration. This usually
//! means that features switch from the most efficient and restrictive
//! implementation, to a less efficient but more flexible one.
//!
//! So using the default features is the best choice unless you have
//! specific requirements. When a crate that uses **Stakker** doesn't
//! care about whether a feature is enabled or not, it should avoid
//! setting it and leave it up to the application to choose.
//!
//! Features enabled by default:
//!
//! - **inter-thread**: Enables inter-thread operations such as
//! [`Waker`] and [`PipedThread`].
//!
//! Optional features:
//!
//! - **no-unsafe-queue**: Disable the fast FnOnce queue implementation,
//! which uses unsafe code. Uses a boxed queue instead.
//!
//! - **no-unsafe**: Disable all unsafe code within this crate, at
//! some cost in time and memory.
//!
//! - **multi-thread**: Specifies that more than one **Stakker** will
//! run in the process, at most one **Stakker** per thread. This
//! disables some optimisations that require process-wide access.
//!
//! - **multi-stakker**: Specifies that more than one **Stakker** may
//! need to run in the same thread. This disables optimisations that
//! require either process-wide or thread-local access.
//!
//! - **inline-deferrer**: Forces use of the inline [`Deferrer`]
//! implementation instead of using the global or thread-local
//! implementation. Possibly useful if thread-locals are very slow.
//!
//! - **logger**: Enables **Stakker**'s core logging feature, which
//! logs actor startup and termination, and which allows macros from
//! the `stakker_log` crate to log with actor context information.
//! See [`Stakker::set_logger`].
//!
//! These are the implementations that are switched, in order of
//! preference, listing most-preferred first:
//!
//! ### Cell type
//!
//! - `TCell`: Best performance, but only allows a single **Stakker**
//! per process
//!
//! - `TLCell`: Best performance, but uses thread-locals at
//! **Stakker** creation time and only allows a single **Stakker** per
//! thread
//!
//! - `QCell`: Allows many **Stakker** instances per thread at some
//! cost in time and memory
//!
//! ### Deferrer
//!
//! - Global deferrer: Uses a global variable to find the [`Deferrer`]
//!
//! - Thread-local deferrer: Uses a thread-local to find the
//! [`Deferrer`], with safe and unsafe variants
//!
//! - Inline deferrer: Keeps references to the [`Deferrer`] in all
//! places where it is needed, with safe and unsafe variants. In
//! particular this adds a `usize` to all actors.
//!
//! ### Actor ref-counting
//!
//! - Packed: Uses a little unsafe code to save a `usize` per actor
//!
//! - Standard: Uses `std::rc::Rc`
//!
//! ### Call queues
//!
//! - Fast `FnOnce` queue: Appends `FnOnce` closures directly to a
//! flat memory buffer. Gives best performance, but uses `unsafe`
//! code.
//!
//! - Boxed queue: Stores closures indirectly by boxing them
//!
//!
//! # Testing
//!
//! **Stakker** has unit and doc tests that give over 90% coverage
//! across all feature combinations. These tests also run cleanly
//! under valgrind and MIRI. In addition there are some fuzz tests
//! and stress tests under `extra/` that further exercise particular
//! components to verify that they operate as expected.
//!
//!
//! # Tutorial example
//!
//! ```
//!# use stakker::{actor, after, call, ret_nop, ret_shutdown, fwd_to, ret, ret_some_to};
//!# use stakker::{Actor, CX, Fwd, Stakker, Ret};
//!# use std::time::{Duration, Instant};
//!#
//! // An actor is represented as a struct which holds the actor state
//! struct Light {
//! start: Instant,
//! on: bool,
//! }
//!
//! impl Light {
//! // This is a "Prep" method which is used to create a Self value
//! // for the actor. `cx` is the actor context and gives access to
//! // Stakker `Core`. (`CX![]` expands to `&mut Cx<'_, Self>`.)
//! // A "Prep" method doesn't have to return a Self value right away.
//! // For example it might asynchronously attempt a connection to a
//! // remote server first before arranging a call to another "Prep"
//! // function which returns the Self value. Once a value is returned,
//! // the actor is "Ready" and any queued-up operations on the actor
//! // will be executed.
//! pub fn init(cx: CX![]) -> Option<Self> {
//! // Use cx.now() instead of Instant::now() to allow execution
//! // in virtual time if supported by the environment.
//! let start = cx.now();
//! Some(Self { start, on: false })
//! }
//!
//! // Methods that may be called once the actor is "Ready" have a
//! // `&mut self` or `&self` first argument.
//! pub fn set(&mut self, cx: CX![], on: bool) {
//! self.on = on;
//! let time = cx.now() - self.start;
//! println!("{:04}.{:03} Light on: {}", time.as_secs(), time.subsec_millis(), on);
//! }
//!
//! // A `Fwd` or `Ret` allows passing data to arbitrary destinations,
//! // like an async callback. Here we use it to return a value.
//! pub fn query(&self, cx: CX![], ret: Ret<bool>) {
//! ret!([ret], self.on);
//! }
//! }
//!
//! // This is another actor that holds a reference to a Light actor.
//! struct Flasher {
//! light: Actor<Light>,
//! interval: Duration,
//! count: usize,
//! }
//!
//! impl Flasher {
//! pub fn init(cx: CX![], light: Actor<Light>,
//! interval: Duration, count: usize) -> Option<Self> {
//! // Defer first switch to the queue
//! call!([cx], switch(true));
//! Some(Self { light, interval, count })
//! }
//!
//! pub fn switch(&mut self, cx: CX![], on: bool) {
//! // Change the light state
//! call!([self.light], set(on));
//!
//! self.count -= 1;
//! if self.count!= 0 {
//! // Call switch again after a delay
//! after!(self.interval, [cx], switch(!on));
//! } else {
//! // Terminate the actor successfully, causing StopCause handler to run
//! cx.stop();
//! }
//!
//! // Query the light state, receiving the response in the method
//! // `recv_state`, which has both fixed and forwarded arguments.
//! let ret = ret_some_to!([cx], recv_state(self.count) as (bool));
//! call!([self.light], query(ret));
//! }
//!
//! fn recv_state(&self, _: CX![], count: usize, state: bool) {
//! println!(" (at count {} received: {})", count, state);
//! }
//! }
//!
//! let mut stakker0 = Stakker::new(Instant::now());
//! let stakker = &mut stakker0;
//!
//! // Create and initialise the Light and Flasher actors. The
//! // Flasher actor is given a reference to the Light. Use a
//! // StopCause handler to shutdown when the Flasher terminates.
//! let light = actor!(stakker, Light::init(), ret_nop!());
//! let _flasher = actor!(
//! stakker,
//! Flasher::init(light.clone(), Duration::from_secs(1), 6),
//! ret_shutdown!(stakker)
//! );
//!
//! // Since we're not in virtual time, we use `Instant::now()` in
//! // this loop, which is then passed on to all the actors as
//! // `cx.now()`. (If you want to run time faster or slower you
//! // could use another source of time.) So all calls in a batch of
//! // processing get the same `cx.now()` value. Also note that
//! // `Instant::now()` uses a Mutex on some platforms so it saves
//! // cycles to call it less often.
//! stakker.run(Instant::now(), false);
//!# if false {
//! while stakker.not_shutdown() {
//! // Wait for next timer to expire. Here there's no I/O polling
//! // required to wait for external events, so just `sleep`
//! let maxdur = stakker.next_wait_max(Instant::now(), Duration::from_secs(60), false);
//! std::thread::sleep(maxdur);
//!
//! // Run queue and timers
//! stakker.run(Instant::now(), false);
//! }
//!# } else { // Use virtual time version when testing
//!# let mut now = Instant::now();
//!# while stakker.not_shutdown() {
//!# now += stakker.next_wait_max(now, Duration::from_secs(60), false);
//!# stakker.run(now, false);
//!# }
//!# }
//! ```
//!
//!
//! # Main loop examples
//!
//! Note that the 60s duration used below just means that the process
//! will wake every 60s if nothing else is going on. You could make
//! this a larger value.
//!
//! ### Virtual time main loop, no I/O, no idle queue handling
//!
//! ```no_run
//!# use stakker::Stakker;
//!# use std::time::{Duration, Instant};
//!# fn test(stakker: &mut Stakker) {
//! let mut now = Instant::now();
//! stakker.run(now, false);
//! while stakker.not_shutdown() {
//! now += stakker.next_wait_max(now, Duration::from_secs(60), false);
//! stakker.run(now, false);
//! }
//!# }
//! ```
//!
//! ### Real time main loop, no I/O, no idle queue handling
//!
//! ```no_run
//!# use stakker::Stakker;
//!# use std::time::{Duration, Instant};
//!# fn test(stakker: &mut Stakker) {
//! stakker.run(Instant::now(), false);
//! while stakker.not_shutdown() {
//! let maxdur = stakker.next_wait_max(Instant::now(), Duration::from_secs(60), false);
//! std::thread::sleep(maxdur);
//! stakker.run(Instant::now(), false);
//! }
//!# }
//! ```
//!
//! ### Real time I/O poller main loop, with idle queue handling
//!
//! This example uses `MioPoll` from the `stakker_mio` crate.
//!
//! ```no_run
//!# use stakker::Stakker;
//!# use std::time::{Duration, Instant};
//!# struct MioPoll;
//!# impl MioPoll { fn poll(&self, d: Duration) -> std::io::Result<bool> { Ok(false) } }
//!# fn test(stakker: &mut Stakker, miopoll: &mut MioPoll) -> std::io::Result<()> {
//! let mut idle_pending = stakker.run(Instant::now(), false);
//! while stakker.not_shutdown() {
//! let maxdur = stakker.next_wait_max(Instant::now(), Duration::from_secs(60), idle_pending);
//! let activity = miopoll.poll(maxdur)?;
//! idle_pending = stakker.run(Instant::now(),!activity);
//! }
//!# Ok(())
//!# }
//! ```
//!
//! The way this works is that if there are idle queue items pending,
//! then `next_wait_max` returns 0s, which means that the `poll` call
//! only checks for new I/O events without blocking. If there are no
//! new events (`activity` is false), then an item from the idle queue
//! is run.
//!
//!
//! # Why the name **Stakker**?
//!
//! "Single-threaded actor runtime" → STACR → **Stakker**.
//! The name is also a small tribute to the 1988 Humanoid track
//! "Stakker Humanoid", which borrows samples from the early video
//! game **Berzerk**, and which rolls along quite economically as I
//! hope the **Stakker** runtime also does.
//!
//! [`ActorOwn`]: struct.ActorOwn.html
//! [`Actor`]: struct.Actor.html
//! [`Channel`]: sync/struct.Channel.html
//! [`Core`]: struct.Core.html
//! [`Cx`]: struct.Cx.html
//! [`Deferrer`]: struct.Deferrer.html
//! [`Fwd`]: struct.Fwd.html
//! [`PipedThread`]: sync/struct.PipedThread.html
//! [`Ret`]: struct.Ret.html
//! [`Share`]: struct.Share.html
//! [`Stakker::set_logger`]: struct.Stakker.html#method.set_logger
//! [`Stakker`]: struct.Stakker.html
//! [`Waker`]: sync/struct.Waker.html
//! [`actor!`]: macro.actor.html
//! [`call!`]: macro.call.html
//! [`fwd!`]: macro.fwd.html
//! [`ret!`]: macro.ret.html
// Insist on 2018 style
#![deny(rust_2018_idioms)]
// No unsafe code is allowed anywhere if no-unsafe is set
#![cfg_attr(feature = "no-unsafe", forbid(unsafe_code))]
// To fix these would break the API
#![allow(clippy::upper_case_acronyms)]
// TODO: Illustrate Fwd in the tutorial example, e.g. make println!
// output go via a Fwd
pub use crate::core::{Core, Stakker};
pub use crate::log::{LogFilter, LogID, LogLevel, LogLevelError, LogRecord, LogVisitor};
pub use actor::{Actor, ActorOwn, ActorOwnAnon, ActorOwnSlab, Cx, StopCause};
pub use deferrer::Deferrer;
pub use fwd::Fwd;
pub use ret::Ret;
pub use share::{Share, ShareWeak};
pub use timers::{FixedTimerKey, MaxTimerKey, MinTimerKey};
// These are for backwards-compatibility. They allow the types to
// still be accessed at the top level of the crate, but hides this in
// the online docs. Not hiding it in the locally-generated docs
// allows semver-checks to pass.
#[cfg_attr(docsrs, doc(hidden))]
pub use sync::{PipedLink, PipedThread, Waker};
/// Auxiliary types that are not interesting in themselves
pub mod aux {
pub use crate::actor::ActorOwnSlabIter;
}
// Trait checks
static_assertions::assert_not_impl_any!(Stakker: Send, Sync, Copy, Clone);
static_assertions::assert_not_impl_any!(Core: Send, Sync, Copy, Clone);
static_assertions::assert_not_impl_any!(Cx<'_, u8>: Send, Sync, Copy, Clone);
static_assertions::assert_not_impl_any!(Ret<u8>: Send, Sync, Copy, Clone);
static_assertions::assert_not_impl_any!(Actor<u8>: Send, Sync, Copy);
static_assertions::assert_not_impl_any!(task::Task: Send, Sync, Copy);
static_assertions::assert_not_impl_any!(Deferrer: Send, Sync, Copy);
static_assertions::assert_not_impl_any!(Share<u8>: Send, Sync, Copy);
static_assertions::assert_not_impl_any!(Fwd<u8>: Send, Sync, Copy);
static_assertions::assert_not_impl_any!(Waker: Copy, Clone);
static_assertions::assert_impl_all!(Actor<u8>: Clone);
static_assertions::assert_impl_all!(Deferrer: Clone);
static_assertions::assert_impl_all!(Share<u8>: Clone);
static_assertions::assert_impl_all!(Fwd<u8>: Clone);
static_assertions::assert_impl_all!(Waker: Send, Sync);
static_assertions::assert_impl_all!(FixedTimerKey: Copy, Clone);
static_assertions::assert_impl_all!(MaxTimerKey: Copy, Clone);
static_assertions::assert_impl_all!(MinTimerKey: Copy, Clone);
mod actor;
mod core;
mod fwd;
mod log;
mod macros;
mod ret;
mod share;
pub mod sync;
pub mod task;
mod timers;
#[cfg(test)]
mod test;
// Ref-counting selections
#[cfg(not(feature = "no-unsafe"))]
mod rc {
pub(crate) mod count;
pub(crate) mod minrc;
pub(crate) mod actorrc_packed;
pub(crate) use actorrc_packed::ActorRc;
pub(crate) mod fwdrc_min;
pub(crate) use fwdrc_min::FwdRc;
}
#[cfg(feature = "no-unsafe")]
mod rc {
pub(crate) mod count;
pub(crate) mod actorrc_std;
pub(crate) use actorrc_std::ActorRc;
pub(crate) mod fwdrc_std;
pub(crate) use fwdrc_std::FwdRc;
}
// Deferrer selection
#[cfg(all(
not(feature = "inline-deferrer"),
not(feature = "multi-stakker"),
not(feature = "multi-thread"),
not(feature = "no-unsafe")
))]
mod deferrer {
mod api;
pub use api::Deferrer;
mod global;
use global::DeferrerAux;
}
#[cfg(all(
not(feature = "inline-deferrer"),
not(feature = "multi-stakker"),
feature = "multi-thread"
))]
mod deferrer {
mod api;
pub use api::Deferrer;
#[cfg(feature = "no-unsafe")]
mod thread_local_safe;
#[cfg(feature = "no-unsafe")]
use thread_local_safe::DeferrerAux;
#[cfg(not(feature = "no-unsafe"))]
mod thread_local;
#[cfg(not(feature = "no-unsafe"))]
use thread_local::DeferrerAux;
}
// Inline deferrer used if neither of the other options fits. Clearer
// to not simplify this boolean expression, because the subexpressions
// should match the expressions above.
#[cfg(all(
not(all(
not(feature = "inline-deferrer"),
not(feature = "multi-stakker"),
not(feature = "multi-thread"),
not(feature = "no-unsafe")
)),
not(all(
not(feature = "inline-deferrer"),
not(feature = "multi-stakker"),
feature = "multi-thread",
)),
))]
mod deferrer {
mod api;
pub use api::Deferrer;
#[cfg(feature = "no-unsafe")]
mod inline_safe;
#[cfg(feature = "no-unsafe")]
use inline_safe::DeferrerAux;
#[cfg(not(feature = "no-unsafe"))]
mod inline;
#[cfg(not(feature = "no-unsafe"))]
use inline::DeferrerAux;
}
// FnOnceQueue selection
#[cfg(not(any(feature = "no-unsafe", feature = "no-unsafe-queue")))]
mod queue {
mod flat;
pub(crate) use flat::FnOnceQueue;
}
#[cfg(any(feature = "no-unsafe", feature = "no-unsafe-queue"))]
mod queue {
mod boxed;
pub(crate) use boxed::FnOnceQueue;
}
// Cell selection
#[cfg(all(not(feature = "multi-stakker"), not(feature = "multi-thread")))]
mod cell {
pub(crate) mod tcell;
pub(crate) use tcell as cell;
}
#[cfg(all(not(feature = "multi-stakker"), feature = "multi-thread"))]
mod cell {
pub(crate) mod tlcell;
pub(crate) use tlcell as cell;
}
#[cfg(feature = "multi-stakker")]
mod cell {
pub(crate) mod qcell;
pub(crate) use self::qcell as cell;
} | //! provided with arguments. These are also efficient due to
//! inlining. In this case two chunks of inlined code are generated
//! for each by the compiler: the first which accepts arguments and
//! pushes the second one onto the queue. | random_line_split |
decisiontree.rs | //! llvm/decisiontree.rs - Defines how to codegen a decision tree
//! via `codegen_tree`. This decisiontree is the result of compiling
//! a match expression into a decisiontree during type inference.
use crate::llvm::{ Generator, CodeGen };
use crate::types::pattern::{ DecisionTree, Case, VariantTag };
use crate::types::{ Type, typed::Typed };
use crate::parser::ast::Match;
use crate::cache::{ ModuleCache, DefinitionInfoId, DefinitionKind };
use crate::nameresolution::builtin::PAIR_ID;
use inkwell::values::{ BasicValueEnum, IntValue, PhiValue };
use inkwell::types::BasicType;
use inkwell::basic_block::BasicBlock;
/// This type alias is used for convenience in codegen_case
/// for adding blocks and values to the switch cases
/// while compiling a given case of a pattern match.
type SwitchCases<'g> = Vec<(IntValue<'g>, BasicBlock<'g>)>;
impl<'g> Generator<'g> {
/// Perform LLVM codegen for the given DecisionTree.
/// This roughly translates the tree into a series of switches and phi nodes.
pub fn codegen_tree<'c>(&mut self, tree: &DecisionTree, match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
let value_to_match = match_expr.expression.codegen(self, cache);
// Each Switch case in the tree works by switching on a given value in a DefinitionInfoId
// then storing each part it extracted into other DefinitionInfoIds and recursing. Thus,
// the initial value needs to be stored in the first id here since before this there was no
// extract and store step that would have set the value beforehand.
if let DecisionTree::Switch(id, _) = tree {
let typ = self.follow_bindings(match_expr.expression.get_type().unwrap(), cache);
self.definitions.insert((*id, typ), value_to_match);
}
let starting_block = self.current_block();
let ending_block = self.insert_into_new_block("match_end");
// Create the phi value to merge the value of all the match branches
let match_type = match_expr.typ.as_ref().unwrap();
let llvm_type = self.convert_type(match_type, cache);
let phi = self.builder.build_phi(llvm_type, "match_result");
// branches may be repeated in the decision tree, so this Vec is used to store the block
// of each branch if it was already codegen'd.
let mut branches: Vec<_> = vec![None; match_expr.branches.len()];
self.builder.position_at_end(starting_block);
// Then codegen the decisiontree itself that will eventually lead to each branch.
self.codegen_subtree(tree, &mut branches, phi, ending_block, match_expr, cache);
self.builder.position_at_end(ending_block);
phi.as_basic_value()
}
/// Recurse on the given DecisionTree, codegening each switch and remembering
/// all the Leaf nodes that have already been compiled, since these may be
/// repeated in the same DecisionTree.
fn codegen_subtree<'c>(&mut self, tree: &DecisionTree, branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>, match_end: BasicBlock<'g>, match_expr: &Match<'c>, cache: &mut ModuleCache<'c>)
{
match tree {
DecisionTree::Leaf(n) => {
// If this leaf has been codegen'd already, branches[n] was already set to Some in codegen_case
match branches[*n] {
Some(_block) => (),
_ => {
self.codegen_branch(&match_expr.branches[*n].1, match_end, cache)
.map(|(branch, value)| phi.add_incoming(&[(&value, branch)]));
}
}
},
DecisionTree::Fail => {
unreachable!("DecisionTree::Fail encountered during DecisionTree codegen. This should have been caught during completeness checking.");
},
DecisionTree::Switch(id, cases) => {
if!cases.is_empty() {
let type_to_switch_on = cache.definition_infos[id.0].typ.as_ref().unwrap();
let type_to_switch_on = self.follow_bindings(type_to_switch_on, cache);
let value_to_switch_on = self.definitions[&(*id, type_to_switch_on)];
let starting_block = self.current_block();
// All llvm switches require an else block, even if this pattern doesn't
// include one. In that case we insert an unreachable instruction.
let else_block = self.codegen_match_else_block(value_to_switch_on,
cases, branches, phi, match_end, match_expr, cache);
let mut switch_cases = vec![];
for case in cases.iter() {
self.codegen_case(case, value_to_switch_on, &mut switch_cases,
branches, phi, match_end, match_expr, cache);
}
self.builder.position_at_end(starting_block);
if cases.len() > 1 {
self.build_switch(value_to_switch_on, else_block, switch_cases);
} else if cases.len() == 1 {
// If we only have 1 case we don't need to test anything, just forcibly
// br to that case. This optimization is necessary for structs since structs
// have no tag to check against.
self.builder.build_unconditional_branch(switch_cases[0].1);
}
}
},
}
}
fn build_switch<'c>(&self,
value_to_switch_on: BasicValueEnum<'g>,
else_block: BasicBlock<'g>,
switch_cases: SwitchCases<'g>)
{
// TODO: Switch to if-else chains over a single switch block.
// Currently this will fail at runtime when attempting to match
// a constructor with a string value after trying to convert it into an
// integer tag value.
let tag = self.extract_tag(value_to_switch_on);
self.builder.build_switch(tag, else_block, &switch_cases);
}
fn codegen_case<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
switch_cases: &mut SwitchCases<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>)
{
// Early out if this is a match-all case. Those should be handled by codegen_match_else_block
let tag = match &case.tag {
Some(tag) => tag,
None => return,
};
// Bind each pattern then codegen the rest of the tree.
// If the rest of the tree is a Leaf that has already been codegen'd we shouldn't compile
// it twice, instead we take its starting block and jump straight to that in the switch case.
let block = match &case.branch {
DecisionTree::Leaf(n) => {
match &branches[*n] {
Some(block) => *block,
None => {
// Codegening the branch also stores its starting_block in branches,
// so we can retrieve it here.
let branch_start = self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache);
branches[*n] = Some(branch_start);
branch_start
}
}
},
_ => self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache)
};
let constructor_tag = self.get_constructor_tag(tag, cache).unwrap();
switch_cases.push((constructor_tag.into_int_value(), block));
}
/// Creates a new llvm::BasicBlock to insert into, then binds the union downcast
/// from the current case, then compiles the rest of the subtree.
fn codegen_case_in_new_block<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let branch_start = self.insert_into_new_block("match_branch");
self.bind_pattern_fields(case, matched_value, cache);
self.codegen_subtree(&case.branch, branches, phi, match_end, match_expr, cache);
branch_start
}
/// Given a tagged union (either { tag: u8,... } or just (tag: u8)), extract the
/// integer tag component to compare which constructor this value was constructed from.
fn extract_tag(&self, variant: BasicValueEnum<'g>) -> IntValue<'g> {
if variant.is_struct_value() {
self.builder.build_extract_value(variant.into_struct_value(), 0, "tag").unwrap().into_int_value()
} else {
assert!(variant.is_int_value());
variant.into_int_value()
}
}
/// Get the tag value that identifies which constructor this is.
fn get_constructor_tag<'c>(&mut self, tag: &VariantTag, cache: &mut ModuleCache<'c>) -> Option<BasicValueEnum<'g>> {
match tag {
VariantTag::True => Some(self.bool_value(true)),
VariantTag::False => Some(self.bool_value(false)),
VariantTag::Unit => Some(self.unit_value()),
// TODO: Remove pair tag, it shouldn't need one
VariantTag::UserDefined(PAIR_ID) => Some(self.unit_value()),
VariantTag::UserDefined(id) => {
match &cache.definition_infos[id.0].definition {
Some(DefinitionKind::TypeConstructor { tag: Some(tag),.. }) => {
Some(self.tag_value(*tag as u8))
},
_ => None,
}
},
VariantTag::Literal(literal) => Some(literal.codegen(self, cache)),
}
}
fn is_union_constructor<'c>(typ: &Type, cache: &ModuleCache<'c>) -> bool {
use crate::types::Type::*;
match typ {
Primitive(_) => false,
Ref(_) => false,
Function(function) => Self::is_union_constructor(&function.return_type, cache),
TypeApplication(typ, _) => Self::is_union_constructor(typ, cache),
ForAll(_, typ) => Self::is_union_constructor(typ, cache),
UserDefinedType(id) => cache.type_infos[id.0].is_union(),
TypeVariable(_) => unreachable!("Constructors should always have concrete types"),
}
}
/// Cast the given value to the given tagged-union variant. Returns None if
/// the given VariantTag is not a tagged-union tag.
fn cast_to_variant_type<'c>(&mut self, value: BasicValueEnum<'g>, case: &Case,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
match &case.tag {
Some(VariantTag::UserDefined(id)) => {
let mut field_types = vec![];
let constructor = &cache.definition_infos[id.0];
if Self::is_union_constructor(constructor.typ.as_ref().unwrap(), cache) {
field_types.push(self.tag_type());
}
for field_ids in case.fields.iter() {
let typ = cache.definition_infos[field_ids[0].0].typ.as_ref().unwrap();
field_types.push(self.convert_type(typ, cache));
}
let cast_type = self.context.struct_type(&field_types, false).as_basic_type_enum();
self.reinterpret_cast_llvm_type(value, cast_type)
},
_ => value, | cases.last().unwrap().tag == None
}
/// codegen an else/match-all case of a particular constructor in a DecisionTree.
/// If there is no MatchAll case (represented by a None value for case.tag) then
/// a block is created with an llvm unreachable assertion.
fn codegen_match_else_block<'c>(&mut self,
value_to_switch_on: BasicValueEnum<'g>,
cases: &[Case],
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let block = self.insert_into_new_block("match_all");
let last_case = cases.last().unwrap();
// If there's a catch-all case we can codegen the code there. Otherwise if this
// constructor has no catchall the resulting code should be unreachable.
if self.has_match_all_case(cases) {
self.bind_pattern_field(value_to_switch_on, &last_case.fields[0], cache);
self.codegen_subtree(&last_case.branch, branches, phi, match_end, match_expr, cache);
} else {
self.builder.build_unreachable();
}
block
}
/// Each Case in a DecisionTree::Switch contains { tag, fields, branch } where tag
/// is the matched constructor tag and fields contains a Vec of Vec<DefinitionInfoId>
/// where the outer Vec contains an inner Vec for each field of the tagged-union variant,
/// and each inner Vec contains the variables to bind the result of that field to. There
/// can be multiple ids for a single field as a result of combining multiple cases into one,
/// see the DecisionTree type and its completeness checking for more information.
fn bind_pattern_field<'c>(&mut self, value: BasicValueEnum<'g>, field: &[DefinitionInfoId], cache: &mut ModuleCache<'c>) {
for id in field {
let typ = self.follow_bindings(cache.definition_infos[id.0].typ.as_ref().unwrap(), cache);
self.definitions.insert((*id, typ), value);
}
}
/// Performs the union downcast, binding each field of the downcasted variant
/// the the appropriate DefinitionInfoIds held within the given Case.
fn bind_pattern_fields<'c>(&mut self, case: &Case, matched_value: BasicValueEnum<'g>, cache: &mut ModuleCache<'c>) {
let variant = self.cast_to_variant_type(matched_value, &case, cache);
// There are three cases here:
// 1. The tag is a tagged union tag. In this case, the value is a tuple of (tag, fields...)
// so bind each nth field to the n+1 value in this tuple.
// 2. The tag is a tuple. In this case, bind each nth tuple field to each nth field.
// 3. The tag is a primitive like true/false. In this case there is only 1 "field" and we
// bind it to the entire value.
match &case.tag {
Some(VariantTag::UserDefined(constructor)) => {
let variant = variant.into_struct_value();
// TODO: Stop special casing pairs and allow a 0 offset
// for every product type
let offset = if *constructor == PAIR_ID { 0 } else { 1 };
for (field_no, ids) in case.fields.iter().enumerate() {
let field = self.builder.build_extract_value(variant, offset + field_no as u32, "pattern_extract").unwrap();
self.bind_pattern_field(field, ids, cache);
}
},
_ => {
assert!(case.fields.len() <= 1);
if case.fields.len() == 1 {
self.bind_pattern_field(variant, &case.fields[0], cache);
}
}
}
}
} | }
}
/// When creating a decision tree, any match all case is always last in the case list.
fn has_match_all_case(&self, cases: &[Case]) -> bool { | random_line_split |
decisiontree.rs | //! llvm/decisiontree.rs - Defines how to codegen a decision tree
//! via `codegen_tree`. This decisiontree is the result of compiling
//! a match expression into a decisiontree during type inference.
use crate::llvm::{ Generator, CodeGen };
use crate::types::pattern::{ DecisionTree, Case, VariantTag };
use crate::types::{ Type, typed::Typed };
use crate::parser::ast::Match;
use crate::cache::{ ModuleCache, DefinitionInfoId, DefinitionKind };
use crate::nameresolution::builtin::PAIR_ID;
use inkwell::values::{ BasicValueEnum, IntValue, PhiValue };
use inkwell::types::BasicType;
use inkwell::basic_block::BasicBlock;
/// This type alias is used for convenience in codegen_case
/// for adding blocks and values to the switch cases
/// while compiling a given case of a pattern match.
type SwitchCases<'g> = Vec<(IntValue<'g>, BasicBlock<'g>)>;
impl<'g> Generator<'g> {
/// Perform LLVM codegen for the given DecisionTree.
/// This roughly translates the tree into a series of switches and phi nodes.
pub fn codegen_tree<'c>(&mut self, tree: &DecisionTree, match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
let value_to_match = match_expr.expression.codegen(self, cache);
// Each Switch case in the tree works by switching on a given value in a DefinitionInfoId
// then storing each part it extracted into other DefinitionInfoIds and recursing. Thus,
// the initial value needs to be stored in the first id here since before this there was no
// extract and store step that would have set the value beforehand.
if let DecisionTree::Switch(id, _) = tree {
let typ = self.follow_bindings(match_expr.expression.get_type().unwrap(), cache);
self.definitions.insert((*id, typ), value_to_match);
}
let starting_block = self.current_block();
let ending_block = self.insert_into_new_block("match_end");
// Create the phi value to merge the value of all the match branches
let match_type = match_expr.typ.as_ref().unwrap();
let llvm_type = self.convert_type(match_type, cache);
let phi = self.builder.build_phi(llvm_type, "match_result");
// branches may be repeated in the decision tree, so this Vec is used to store the block
// of each branch if it was already codegen'd.
let mut branches: Vec<_> = vec![None; match_expr.branches.len()];
self.builder.position_at_end(starting_block);
// Then codegen the decisiontree itself that will eventually lead to each branch.
self.codegen_subtree(tree, &mut branches, phi, ending_block, match_expr, cache);
self.builder.position_at_end(ending_block);
phi.as_basic_value()
}
/// Recurse on the given DecisionTree, codegening each switch and remembering
/// all the Leaf nodes that have already been compiled, since these may be
/// repeated in the same DecisionTree.
fn codegen_subtree<'c>(&mut self, tree: &DecisionTree, branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>, match_end: BasicBlock<'g>, match_expr: &Match<'c>, cache: &mut ModuleCache<'c>)
{
match tree {
DecisionTree::Leaf(n) => {
// If this leaf has been codegen'd already, branches[n] was already set to Some in codegen_case
match branches[*n] {
Some(_block) => (),
_ => {
self.codegen_branch(&match_expr.branches[*n].1, match_end, cache)
.map(|(branch, value)| phi.add_incoming(&[(&value, branch)]));
}
}
},
DecisionTree::Fail => {
unreachable!("DecisionTree::Fail encountered during DecisionTree codegen. This should have been caught during completeness checking.");
},
DecisionTree::Switch(id, cases) => {
if!cases.is_empty() {
let type_to_switch_on = cache.definition_infos[id.0].typ.as_ref().unwrap();
let type_to_switch_on = self.follow_bindings(type_to_switch_on, cache);
let value_to_switch_on = self.definitions[&(*id, type_to_switch_on)];
let starting_block = self.current_block();
// All llvm switches require an else block, even if this pattern doesn't
// include one. In that case we insert an unreachable instruction.
let else_block = self.codegen_match_else_block(value_to_switch_on,
cases, branches, phi, match_end, match_expr, cache);
let mut switch_cases = vec![];
for case in cases.iter() {
self.codegen_case(case, value_to_switch_on, &mut switch_cases,
branches, phi, match_end, match_expr, cache);
}
self.builder.position_at_end(starting_block);
if cases.len() > 1 {
self.build_switch(value_to_switch_on, else_block, switch_cases);
} else if cases.len() == 1 {
// If we only have 1 case we don't need to test anything, just forcibly
// br to that case. This optimization is necessary for structs since structs
// have no tag to check against.
self.builder.build_unconditional_branch(switch_cases[0].1);
}
}
},
}
}
fn build_switch<'c>(&self,
value_to_switch_on: BasicValueEnum<'g>,
else_block: BasicBlock<'g>,
switch_cases: SwitchCases<'g>)
{
// TODO: Switch to if-else chains over a single switch block.
// Currently this will fail at runtime when attempting to match
// a constructor with a string value after trying to convert it into an
// integer tag value.
let tag = self.extract_tag(value_to_switch_on);
self.builder.build_switch(tag, else_block, &switch_cases);
}
fn codegen_case<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
switch_cases: &mut SwitchCases<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>)
{
// Early out if this is a match-all case. Those should be handled by codegen_match_else_block
let tag = match &case.tag {
Some(tag) => tag,
None => return,
};
// Bind each pattern then codegen the rest of the tree.
// If the rest of the tree is a Leaf that has already been codegen'd we shouldn't compile
// it twice, instead we take its starting block and jump straight to that in the switch case.
let block = match &case.branch {
DecisionTree::Leaf(n) => {
match &branches[*n] {
Some(block) => *block,
None => {
// Codegening the branch also stores its starting_block in branches,
// so we can retrieve it here.
let branch_start = self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache);
branches[*n] = Some(branch_start);
branch_start
}
}
},
_ => self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache)
};
let constructor_tag = self.get_constructor_tag(tag, cache).unwrap();
switch_cases.push((constructor_tag.into_int_value(), block));
}
/// Creates a new llvm::BasicBlock to insert into, then binds the union downcast
/// from the current case, then compiles the rest of the subtree.
fn codegen_case_in_new_block<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let branch_start = self.insert_into_new_block("match_branch");
self.bind_pattern_fields(case, matched_value, cache);
self.codegen_subtree(&case.branch, branches, phi, match_end, match_expr, cache);
branch_start
}
/// Given a tagged union (either { tag: u8,... } or just (tag: u8)), extract the
/// integer tag component to compare which constructor this value was constructed from.
fn extract_tag(&self, variant: BasicValueEnum<'g>) -> IntValue<'g> {
if variant.is_struct_value() {
self.builder.build_extract_value(variant.into_struct_value(), 0, "tag").unwrap().into_int_value()
} else {
assert!(variant.is_int_value());
variant.into_int_value()
}
}
/// Get the tag value that identifies which constructor this is.
fn | <'c>(&mut self, tag: &VariantTag, cache: &mut ModuleCache<'c>) -> Option<BasicValueEnum<'g>> {
match tag {
VariantTag::True => Some(self.bool_value(true)),
VariantTag::False => Some(self.bool_value(false)),
VariantTag::Unit => Some(self.unit_value()),
// TODO: Remove pair tag, it shouldn't need one
VariantTag::UserDefined(PAIR_ID) => Some(self.unit_value()),
VariantTag::UserDefined(id) => {
match &cache.definition_infos[id.0].definition {
Some(DefinitionKind::TypeConstructor { tag: Some(tag),.. }) => {
Some(self.tag_value(*tag as u8))
},
_ => None,
}
},
VariantTag::Literal(literal) => Some(literal.codegen(self, cache)),
}
}
fn is_union_constructor<'c>(typ: &Type, cache: &ModuleCache<'c>) -> bool {
use crate::types::Type::*;
match typ {
Primitive(_) => false,
Ref(_) => false,
Function(function) => Self::is_union_constructor(&function.return_type, cache),
TypeApplication(typ, _) => Self::is_union_constructor(typ, cache),
ForAll(_, typ) => Self::is_union_constructor(typ, cache),
UserDefinedType(id) => cache.type_infos[id.0].is_union(),
TypeVariable(_) => unreachable!("Constructors should always have concrete types"),
}
}
/// Cast the given value to the given tagged-union variant. Returns None if
/// the given VariantTag is not a tagged-union tag.
fn cast_to_variant_type<'c>(&mut self, value: BasicValueEnum<'g>, case: &Case,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
match &case.tag {
Some(VariantTag::UserDefined(id)) => {
let mut field_types = vec![];
let constructor = &cache.definition_infos[id.0];
if Self::is_union_constructor(constructor.typ.as_ref().unwrap(), cache) {
field_types.push(self.tag_type());
}
for field_ids in case.fields.iter() {
let typ = cache.definition_infos[field_ids[0].0].typ.as_ref().unwrap();
field_types.push(self.convert_type(typ, cache));
}
let cast_type = self.context.struct_type(&field_types, false).as_basic_type_enum();
self.reinterpret_cast_llvm_type(value, cast_type)
},
_ => value,
}
}
/// When creating a decision tree, any match all case is always last in the case list.
fn has_match_all_case(&self, cases: &[Case]) -> bool {
cases.last().unwrap().tag == None
}
/// codegen an else/match-all case of a particular constructor in a DecisionTree.
/// If there is no MatchAll case (represented by a None value for case.tag) then
/// a block is created with an llvm unreachable assertion.
fn codegen_match_else_block<'c>(&mut self,
value_to_switch_on: BasicValueEnum<'g>,
cases: &[Case],
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let block = self.insert_into_new_block("match_all");
let last_case = cases.last().unwrap();
// If there's a catch-all case we can codegen the code there. Otherwise if this
// constructor has no catchall the resulting code should be unreachable.
if self.has_match_all_case(cases) {
self.bind_pattern_field(value_to_switch_on, &last_case.fields[0], cache);
self.codegen_subtree(&last_case.branch, branches, phi, match_end, match_expr, cache);
} else {
self.builder.build_unreachable();
}
block
}
/// Each Case in a DecisionTree::Switch contains { tag, fields, branch } where tag
/// is the matched constructor tag and fields contains a Vec of Vec<DefinitionInfoId>
/// where the outer Vec contains an inner Vec for each field of the tagged-union variant,
/// and each inner Vec contains the variables to bind the result of that field to. There
/// can be multiple ids for a single field as a result of combining multiple cases into one,
/// see the DecisionTree type and its completeness checking for more information.
fn bind_pattern_field<'c>(&mut self, value: BasicValueEnum<'g>, field: &[DefinitionInfoId], cache: &mut ModuleCache<'c>) {
for id in field {
let typ = self.follow_bindings(cache.definition_infos[id.0].typ.as_ref().unwrap(), cache);
self.definitions.insert((*id, typ), value);
}
}
/// Performs the union downcast, binding each field of the downcasted variant
/// the the appropriate DefinitionInfoIds held within the given Case.
fn bind_pattern_fields<'c>(&mut self, case: &Case, matched_value: BasicValueEnum<'g>, cache: &mut ModuleCache<'c>) {
let variant = self.cast_to_variant_type(matched_value, &case, cache);
// There are three cases here:
// 1. The tag is a tagged union tag. In this case, the value is a tuple of (tag, fields...)
// so bind each nth field to the n+1 value in this tuple.
// 2. The tag is a tuple. In this case, bind each nth tuple field to each nth field.
// 3. The tag is a primitive like true/false. In this case there is only 1 "field" and we
// bind it to the entire value.
match &case.tag {
Some(VariantTag::UserDefined(constructor)) => {
let variant = variant.into_struct_value();
// TODO: Stop special casing pairs and allow a 0 offset
// for every product type
let offset = if *constructor == PAIR_ID { 0 } else { 1 };
for (field_no, ids) in case.fields.iter().enumerate() {
let field = self.builder.build_extract_value(variant, offset + field_no as u32, "pattern_extract").unwrap();
self.bind_pattern_field(field, ids, cache);
}
},
_ => {
assert!(case.fields.len() <= 1);
if case.fields.len() == 1 {
self.bind_pattern_field(variant, &case.fields[0], cache);
}
}
}
}
}
| get_constructor_tag | identifier_name |
decisiontree.rs | //! llvm/decisiontree.rs - Defines how to codegen a decision tree
//! via `codegen_tree`. This decisiontree is the result of compiling
//! a match expression into a decisiontree during type inference.
use crate::llvm::{ Generator, CodeGen };
use crate::types::pattern::{ DecisionTree, Case, VariantTag };
use crate::types::{ Type, typed::Typed };
use crate::parser::ast::Match;
use crate::cache::{ ModuleCache, DefinitionInfoId, DefinitionKind };
use crate::nameresolution::builtin::PAIR_ID;
use inkwell::values::{ BasicValueEnum, IntValue, PhiValue };
use inkwell::types::BasicType;
use inkwell::basic_block::BasicBlock;
/// This type alias is used for convenience in codegen_case
/// for adding blocks and values to the switch cases
/// while compiling a given case of a pattern match.
type SwitchCases<'g> = Vec<(IntValue<'g>, BasicBlock<'g>)>;
impl<'g> Generator<'g> {
/// Perform LLVM codegen for the given DecisionTree.
/// This roughly translates the tree into a series of switches and phi nodes.
pub fn codegen_tree<'c>(&mut self, tree: &DecisionTree, match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
let value_to_match = match_expr.expression.codegen(self, cache);
// Each Switch case in the tree works by switching on a given value in a DefinitionInfoId
// then storing each part it extracted into other DefinitionInfoIds and recursing. Thus,
// the initial value needs to be stored in the first id here since before this there was no
// extract and store step that would have set the value beforehand.
if let DecisionTree::Switch(id, _) = tree {
let typ = self.follow_bindings(match_expr.expression.get_type().unwrap(), cache);
self.definitions.insert((*id, typ), value_to_match);
}
let starting_block = self.current_block();
let ending_block = self.insert_into_new_block("match_end");
// Create the phi value to merge the value of all the match branches
let match_type = match_expr.typ.as_ref().unwrap();
let llvm_type = self.convert_type(match_type, cache);
let phi = self.builder.build_phi(llvm_type, "match_result");
// branches may be repeated in the decision tree, so this Vec is used to store the block
// of each branch if it was already codegen'd.
let mut branches: Vec<_> = vec![None; match_expr.branches.len()];
self.builder.position_at_end(starting_block);
// Then codegen the decisiontree itself that will eventually lead to each branch.
self.codegen_subtree(tree, &mut branches, phi, ending_block, match_expr, cache);
self.builder.position_at_end(ending_block);
phi.as_basic_value()
}
/// Recurse on the given DecisionTree, codegening each switch and remembering
/// all the Leaf nodes that have already been compiled, since these may be
/// repeated in the same DecisionTree.
fn codegen_subtree<'c>(&mut self, tree: &DecisionTree, branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>, match_end: BasicBlock<'g>, match_expr: &Match<'c>, cache: &mut ModuleCache<'c>)
{
match tree {
DecisionTree::Leaf(n) => {
// If this leaf has been codegen'd already, branches[n] was already set to Some in codegen_case
match branches[*n] {
Some(_block) => (),
_ => {
self.codegen_branch(&match_expr.branches[*n].1, match_end, cache)
.map(|(branch, value)| phi.add_incoming(&[(&value, branch)]));
}
}
},
DecisionTree::Fail => {
unreachable!("DecisionTree::Fail encountered during DecisionTree codegen. This should have been caught during completeness checking.");
},
DecisionTree::Switch(id, cases) => {
if!cases.is_empty() {
let type_to_switch_on = cache.definition_infos[id.0].typ.as_ref().unwrap();
let type_to_switch_on = self.follow_bindings(type_to_switch_on, cache);
let value_to_switch_on = self.definitions[&(*id, type_to_switch_on)];
let starting_block = self.current_block();
// All llvm switches require an else block, even if this pattern doesn't
// include one. In that case we insert an unreachable instruction.
let else_block = self.codegen_match_else_block(value_to_switch_on,
cases, branches, phi, match_end, match_expr, cache);
let mut switch_cases = vec![];
for case in cases.iter() {
self.codegen_case(case, value_to_switch_on, &mut switch_cases,
branches, phi, match_end, match_expr, cache);
}
self.builder.position_at_end(starting_block);
if cases.len() > 1 {
self.build_switch(value_to_switch_on, else_block, switch_cases);
} else if cases.len() == 1 {
// If we only have 1 case we don't need to test anything, just forcibly
// br to that case. This optimization is necessary for structs since structs
// have no tag to check against.
self.builder.build_unconditional_branch(switch_cases[0].1);
}
}
},
}
}
fn build_switch<'c>(&self,
value_to_switch_on: BasicValueEnum<'g>,
else_block: BasicBlock<'g>,
switch_cases: SwitchCases<'g>)
{
// TODO: Switch to if-else chains over a single switch block.
// Currently this will fail at runtime when attempting to match
// a constructor with a string value after trying to convert it into an
// integer tag value.
let tag = self.extract_tag(value_to_switch_on);
self.builder.build_switch(tag, else_block, &switch_cases);
}
fn codegen_case<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
switch_cases: &mut SwitchCases<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>)
{
// Early out if this is a match-all case. Those should be handled by codegen_match_else_block
let tag = match &case.tag {
Some(tag) => tag,
None => return,
};
// Bind each pattern then codegen the rest of the tree.
// If the rest of the tree is a Leaf that has already been codegen'd we shouldn't compile
// it twice, instead we take its starting block and jump straight to that in the switch case.
let block = match &case.branch {
DecisionTree::Leaf(n) => {
match &branches[*n] {
Some(block) => *block,
None => {
// Codegening the branch also stores its starting_block in branches,
// so we can retrieve it here.
let branch_start = self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache);
branches[*n] = Some(branch_start);
branch_start
}
}
},
_ => self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache)
};
let constructor_tag = self.get_constructor_tag(tag, cache).unwrap();
switch_cases.push((constructor_tag.into_int_value(), block));
}
/// Creates a new llvm::BasicBlock to insert into, then binds the union downcast
/// from the current case, then compiles the rest of the subtree.
fn codegen_case_in_new_block<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let branch_start = self.insert_into_new_block("match_branch");
self.bind_pattern_fields(case, matched_value, cache);
self.codegen_subtree(&case.branch, branches, phi, match_end, match_expr, cache);
branch_start
}
/// Given a tagged union (either { tag: u8,... } or just (tag: u8)), extract the
/// integer tag component to compare which constructor this value was constructed from.
fn extract_tag(&self, variant: BasicValueEnum<'g>) -> IntValue<'g> {
if variant.is_struct_value() {
self.builder.build_extract_value(variant.into_struct_value(), 0, "tag").unwrap().into_int_value()
} else {
assert!(variant.is_int_value());
variant.into_int_value()
}
}
/// Get the tag value that identifies which constructor this is.
fn get_constructor_tag<'c>(&mut self, tag: &VariantTag, cache: &mut ModuleCache<'c>) -> Option<BasicValueEnum<'g>> {
match tag {
VariantTag::True => Some(self.bool_value(true)),
VariantTag::False => Some(self.bool_value(false)),
VariantTag::Unit => Some(self.unit_value()),
// TODO: Remove pair tag, it shouldn't need one
VariantTag::UserDefined(PAIR_ID) => Some(self.unit_value()),
VariantTag::UserDefined(id) => {
match &cache.definition_infos[id.0].definition {
Some(DefinitionKind::TypeConstructor { tag: Some(tag),.. }) => {
Some(self.tag_value(*tag as u8))
},
_ => None,
}
},
VariantTag::Literal(literal) => Some(literal.codegen(self, cache)),
}
}
fn is_union_constructor<'c>(typ: &Type, cache: &ModuleCache<'c>) -> bool {
use crate::types::Type::*;
match typ {
Primitive(_) => false,
Ref(_) => false,
Function(function) => Self::is_union_constructor(&function.return_type, cache),
TypeApplication(typ, _) => Self::is_union_constructor(typ, cache),
ForAll(_, typ) => Self::is_union_constructor(typ, cache),
UserDefinedType(id) => cache.type_infos[id.0].is_union(),
TypeVariable(_) => unreachable!("Constructors should always have concrete types"),
}
}
/// Cast the given value to the given tagged-union variant. Returns None if
/// the given VariantTag is not a tagged-union tag.
fn cast_to_variant_type<'c>(&mut self, value: BasicValueEnum<'g>, case: &Case,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
match &case.tag {
Some(VariantTag::UserDefined(id)) => {
let mut field_types = vec![];
let constructor = &cache.definition_infos[id.0];
if Self::is_union_constructor(constructor.typ.as_ref().unwrap(), cache) {
field_types.push(self.tag_type());
}
for field_ids in case.fields.iter() {
let typ = cache.definition_infos[field_ids[0].0].typ.as_ref().unwrap();
field_types.push(self.convert_type(typ, cache));
}
let cast_type = self.context.struct_type(&field_types, false).as_basic_type_enum();
self.reinterpret_cast_llvm_type(value, cast_type)
},
_ => value,
}
}
/// When creating a decision tree, any match all case is always last in the case list.
fn has_match_all_case(&self, cases: &[Case]) -> bool {
cases.last().unwrap().tag == None
}
/// codegen an else/match-all case of a particular constructor in a DecisionTree.
/// If there is no MatchAll case (represented by a None value for case.tag) then
/// a block is created with an llvm unreachable assertion.
fn codegen_match_else_block<'c>(&mut self,
value_to_switch_on: BasicValueEnum<'g>,
cases: &[Case],
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let block = self.insert_into_new_block("match_all");
let last_case = cases.last().unwrap();
// If there's a catch-all case we can codegen the code there. Otherwise if this
// constructor has no catchall the resulting code should be unreachable.
if self.has_match_all_case(cases) {
self.bind_pattern_field(value_to_switch_on, &last_case.fields[0], cache);
self.codegen_subtree(&last_case.branch, branches, phi, match_end, match_expr, cache);
} else {
self.builder.build_unreachable();
}
block
}
/// Each Case in a DecisionTree::Switch contains { tag, fields, branch } where tag
/// is the matched constructor tag and fields contains a Vec of Vec<DefinitionInfoId>
/// where the outer Vec contains an inner Vec for each field of the tagged-union variant,
/// and each inner Vec contains the variables to bind the result of that field to. There
/// can be multiple ids for a single field as a result of combining multiple cases into one,
/// see the DecisionTree type and its completeness checking for more information.
fn bind_pattern_field<'c>(&mut self, value: BasicValueEnum<'g>, field: &[DefinitionInfoId], cache: &mut ModuleCache<'c>) |
/// Performs the union downcast, binding each field of the downcasted variant
/// the the appropriate DefinitionInfoIds held within the given Case.
fn bind_pattern_fields<'c>(&mut self, case: &Case, matched_value: BasicValueEnum<'g>, cache: &mut ModuleCache<'c>) {
let variant = self.cast_to_variant_type(matched_value, &case, cache);
// There are three cases here:
// 1. The tag is a tagged union tag. In this case, the value is a tuple of (tag, fields...)
// so bind each nth field to the n+1 value in this tuple.
// 2. The tag is a tuple. In this case, bind each nth tuple field to each nth field.
// 3. The tag is a primitive like true/false. In this case there is only 1 "field" and we
// bind it to the entire value.
match &case.tag {
Some(VariantTag::UserDefined(constructor)) => {
let variant = variant.into_struct_value();
// TODO: Stop special casing pairs and allow a 0 offset
// for every product type
let offset = if *constructor == PAIR_ID { 0 } else { 1 };
for (field_no, ids) in case.fields.iter().enumerate() {
let field = self.builder.build_extract_value(variant, offset + field_no as u32, "pattern_extract").unwrap();
self.bind_pattern_field(field, ids, cache);
}
},
_ => {
assert!(case.fields.len() <= 1);
if case.fields.len() == 1 {
self.bind_pattern_field(variant, &case.fields[0], cache);
}
}
}
}
}
| {
for id in field {
let typ = self.follow_bindings(cache.definition_infos[id.0].typ.as_ref().unwrap(), cache);
self.definitions.insert((*id, typ), value);
}
} | identifier_body |
main.rs | // -- SymSpell --
// Explanation at
// https://medium.com/@wolfgarbe/1000x-faster-spelling-correction-algorithm-2012-8701fcd87a5f
// TL;DR, HashTable keys are generated
// from all words + all possible
// permutations of those words with up
// to two deletes, and the data held
// in each key is the correctly spelled
// word (or possible words) with their
// count included to determine which
// of the possible words is more likely.
use std::collections::HashMap;
// word == word
// score == word priority // higher number == higher priority
#[derive(Debug, Clone)]
struct Word {
word: String,
score: u64
}
// word_map must be populated after
// Dictionary struct is created.
// word_map is the reference dictionary.
// All entries here are considered correct
// error_map is the compiled list
// of acceptable permutations.
// word_map is searched first for inputs.
// if none are found, then
// error_map is then searched for possible matches
#[derive(Debug, Clone)]
struct Dictionary {
word_map: HashMap<String, Word>,
error_map: HashMap<String, Vec<String>>,
error_distance: u8
}
// UNIMPLEMENTED YET
// only counts in word_map are measured to
// determined probable match
// only counts in word_map are incremented
// and check when inserting a new word.
// counts in error_map are ignored.
// only necessary for word_map,
// only word_map requires knowing
// the word score.
// error_map can be Hash<String, Vec<String>>
impl Word {
fn new(word: &str, count: u64) -> Word {
Word {
word: word.to_string(),
score: count
}
}
}
impl Dictionary {
fn new() -> Dictionary {
Dictionary {
word_map: HashMap::new(),
error_map: HashMap::new(),
error_distance: 2
}
}
fn insert(&mut self, word: &str) {
if let Some(x) = self.word_map.get_mut(word) {
x.score += 1;
} else {
self.word_map
.insert(
word.to_string(),
Word::new(word, 1)
);
}
}
fn insert_with_count(&mut self, word: &str, count: u64) {
self.insert(word);
self.word_map
.get_mut(word)
.unwrap()
.score = count;
}
// Permutations
// inserted don't replace the
// existing permutations,
// they are only are
// appended to the existing
// values.
fn insert_with_permutations(&mut self, word: &str) {
if let Some(_x) = self.word_map.get_mut(word) {
self.add_permutations(word);
} else {
self.insert(word);// insert new word.
self.add_permutations(word);
}
}
fn insert_with_permutations_and_count(&mut self, word: &str, count: u64) {
if let Some(x) = self.word_map.get_mut(word) {
x.score = count;
self.add_permutations(word);
} else {
self.insert_with_count(word, count);// insert new word.
self.add_permutations(word);
}
}
fn add_permutations(&mut self, word: &str) | new_set.insert(word.to_string(), ());
x.clear();
for j in new_set.keys() {
x.push(j.to_string());
}
} else {
self.error_map
.insert(
i.clone(),
vec![word.to_string()]
);
}
}
}
fn generate_permutations(&self, word: &str) -> Vec<String> {
let mut permutations: Vec<String> = Vec::new();
// Generate permutations of this word
for i in 0..word.len() {
let mut permuted: Vec<char> = word.chars().collect();
permuted.remove(i);
permutations.push(permuted.into_iter().collect::<String>());
}
permutations
}
fn permutations_of(&self, word: &str) -> Vec<String> {
let mut permutation_list: HashMap<String, ()> = HashMap::new();
permutation_list.insert(word.to_string(), ());
for _i in 0..self.error_distance {
for u in permutation_list.clone().keys() {
for o in self.generate_permutations(u) {
permutation_list.insert(o, ());
}
}
}
let mut result: Vec<String> = Vec::new();
for i in permutation_list.keys() {
result.push(i.to_string());
}
result
}
fn find_best_match(&self, possibilities: Vec<String>) -> String {
let mut max = 0;
let mut best_match: Option<String> = None;
for i in possibilities.clone() {
let score = self.word_map[&i].score;
if score > max {
max = score;
best_match = Some(i.clone());
}
}
best_match.expect("Nothing matched in iterator... somehow...")
}
fn generate_errors(&mut self) {
let mut result = self.error_map.clone();
// word_map: HashMap<String, Word>
// error_map: HashMap<String, Vec<String>>
let error_map = if self.error_map.is_empty() {
// Word -> Vec<String>
// Word ==.word : String
// but Word is behind a HashMap key...
// So we iterate and convert it
// to Vec<String>
let mut words: HashMap<String, Vec<String>> = HashMap::new();
for s in self.word_map.clone().keys() {
words.insert(s.to_string(), vec![s.to_string()]);
}
words // Vec<String>
} else {
self.error_map.clone() // Vec<String>
};
for i in error_map.keys() {
if i.len() > 2 {
for u in 0..i.len() {
let mut permuted: Vec<char> = i.chars().collect();
permuted.remove(u);
let permutation = permuted.into_iter().collect::<String>();
if let Some(x) = result.get_mut(&permutation) {
let mut set: HashMap<String, ()> = HashMap::new();
for w in x.clone() {
set.entry(w.clone()).or_insert(());
}
// for w in error_map.get(i).unwrap().clone() {
// set.entry(w.word).or_insert(());
// }
let mut y: Vec<String> = Vec::new();
for k in set.keys() {
y.push(k.to_string());
}
x.clear();
for v in y {
x.push(v);
}
} else {
result
.entry(permutation)
.or_insert(error_map.get(i).unwrap().clone());
}
}
}
}
self.error_map = result;
}
fn check(&self, word: &str) -> Option<String>{
// regular functions don't capture parent scope.
// closures do catch parent scope
let find = |word: &str| -> Option<String> {
if let Some(x) = self.word_map.get(word) {
Some(x.word.clone())
} else if let Some(x) = self.error_map.get(word) {
if x.len() > 1 {
Some(self.find_best_match(x.to_vec()))
} else {
Some(x[0].clone())
}
} else {
None
}
};
if let Some(x) = find(word) {
return Some(x);
}
let mut permutations = vec![word.to_string()];
permutations.extend(self.permutations_of(word));
for v in permutations.clone() {
permutations.extend(self.permutations_of(&v));
}
for i in permutations {
if let Some(x) = find(&i) {
return Some(x);
}
}
return None;
}
}
fn main() {
let mut d = Dictionary::new();
// d.insert_with_permutations("Fork");
// d.insert_with_permutations("Doofus");
// d.insert_with_permutations_and_count("Bell", 32);
// d.insert_with_permutations_and_count("Belly", 29);
// d.insert_with_permutations_and_count("Bellow", 19);
// println!("{:?}", d.generate_permutations("Bell"));
// println!("{:?}", "===");
// println!("{:?}", d.generate_permutations("Belly"));
// println!("{:?}", "===");
// for i in d.word_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// for i in d.error_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// println!("{:?}", d.check("Dofus"));
// println!("{:?}", d.check("Dfus"));
// println!("{:?}", d.check("Doooofus"));
// println!("{:?}", d.check("Dooofus"));
// println!("{:?}", d.check("Forky"));
// println!("{:?}", d.check("Forkyy"));
// println!("{:?}", d.check("Fo"));
// println!("Hello, world!");
// Testing setup
use std::io;
loop {
let mut cmd = String::new();
io::stdin().read_line(&mut cmd).expect("no work... >:(");
if cmd.trim() == "add" {
let mut word = String::new();
let mut value = String::new();
println!("word? ");
io::stdin().read_line(&mut word).expect("no work... >:(");
println!("value? ");
io::stdin().read_line(&mut value).expect("no work... >:(");
d.insert_with_permutations_and_count(word.trim().as_ref(), value.trim().parse().expect("not a number"));
} else {
match d.check(&cmd.trim()) {
Some(x) => println!("Did you mean {}?", x),
_ => println!("Not found :(")
};
}
}
}
| {
// Vec<String>
// Must only contain inserts of
// correct words
let permuted_keys = self.permutations_of(word);
for i in permuted_keys {
// if error key exists
if let Some(x) = self.error_map.get_mut(&i) {
let mut new_set: HashMap<String, ()> = HashMap::new();
// collect vector of existing
// correct possibilities
// into hashmap to prevent
// duplicate entries
for y in x.clone() {
new_set.insert(y, ());
}
// Add the new word to
// list of correct
// possibilities
// at this key | identifier_body |
main.rs | // -- SymSpell --
// Explanation at
// https://medium.com/@wolfgarbe/1000x-faster-spelling-correction-algorithm-2012-8701fcd87a5f
// TL;DR, HashTable keys are generated
// from all words + all possible
// permutations of those words with up
// to two deletes, and the data held
// in each key is the correctly spelled
// word (or possible words) with their
// count included to determine which
// of the possible words is more likely.
use std::collections::HashMap;
// word == word
// score == word priority // higher number == higher priority
#[derive(Debug, Clone)]
struct Word {
word: String,
score: u64
}
// word_map must be populated after
// Dictionary struct is created.
// word_map is the reference dictionary.
// All entries here are considered correct
// error_map is the compiled list
// of acceptable permutations.
// word_map is searched first for inputs.
// if none are found, then
// error_map is then searched for possible matches
#[derive(Debug, Clone)]
struct Dictionary {
word_map: HashMap<String, Word>,
error_map: HashMap<String, Vec<String>>,
error_distance: u8
}
// UNIMPLEMENTED YET
// only counts in word_map are measured to
// determined probable match
// only counts in word_map are incremented
// and check when inserting a new word.
// counts in error_map are ignored.
// only necessary for word_map,
// only word_map requires knowing
// the word score.
// error_map can be Hash<String, Vec<String>>
impl Word {
fn new(word: &str, count: u64) -> Word {
Word {
word: word.to_string(),
score: count
}
}
}
impl Dictionary {
fn new() -> Dictionary {
Dictionary {
word_map: HashMap::new(),
error_map: HashMap::new(),
error_distance: 2
}
}
fn insert(&mut self, word: &str) {
if let Some(x) = self.word_map.get_mut(word) {
x.score += 1;
} else {
self.word_map
.insert(
word.to_string(),
Word::new(word, 1)
);
}
}
fn insert_with_count(&mut self, word: &str, count: u64) {
self.insert(word);
self.word_map
.get_mut(word)
.unwrap()
.score = count;
}
// Permutations
// inserted don't replace the
// existing permutations,
// they are only are
// appended to the existing
// values.
fn insert_with_permutations(&mut self, word: &str) {
if let Some(_x) = self.word_map.get_mut(word) {
self.add_permutations(word);
} else {
self.insert(word);// insert new word.
self.add_permutations(word);
}
}
fn insert_with_permutations_and_count(&mut self, word: &str, count: u64) {
if let Some(x) = self.word_map.get_mut(word) {
x.score = count;
self.add_permutations(word);
} else {
self.insert_with_count(word, count);// insert new word.
self.add_permutations(word);
}
}
fn add_permutations(&mut self, word: &str) {
// Vec<String>
// Must only contain inserts of
// correct words
let permuted_keys = self.permutations_of(word);
for i in permuted_keys {
// if error key exists
if let Some(x) = self.error_map.get_mut(&i) {
let mut new_set: HashMap<String, ()> = HashMap::new();
// collect vector of existing
// correct possibilities
// into hashmap to prevent
// duplicate entries
for y in x.clone() {
new_set.insert(y, ());
}
// Add the new word to
// list of correct
// possibilities
// at this key
new_set.insert(word.to_string(), ());
x.clear();
for j in new_set.keys() {
x.push(j.to_string());
}
} else {
self.error_map
.insert(
i.clone(),
vec![word.to_string()]
);
}
}
}
fn generate_permutations(&self, word: &str) -> Vec<String> {
let mut permutations: Vec<String> = Vec::new();
// Generate permutations of this word
for i in 0..word.len() {
let mut permuted: Vec<char> = word.chars().collect();
permuted.remove(i);
permutations.push(permuted.into_iter().collect::<String>());
}
permutations
}
fn permutations_of(&self, word: &str) -> Vec<String> {
let mut permutation_list: HashMap<String, ()> = HashMap::new();
permutation_list.insert(word.to_string(), ());
for _i in 0..self.error_distance {
for u in permutation_list.clone().keys() {
for o in self.generate_permutations(u) {
permutation_list.insert(o, ());
}
}
}
let mut result: Vec<String> = Vec::new();
for i in permutation_list.keys() {
result.push(i.to_string());
}
result
}
fn find_best_match(&self, possibilities: Vec<String>) -> String {
let mut max = 0;
let mut best_match: Option<String> = None;
for i in possibilities.clone() {
let score = self.word_map[&i].score;
if score > max {
max = score;
best_match = Some(i.clone());
}
}
best_match.expect("Nothing matched in iterator... somehow...")
}
fn generate_errors(&mut self) {
let mut result = self.error_map.clone();
// word_map: HashMap<String, Word>
// error_map: HashMap<String, Vec<String>>
let error_map = if self.error_map.is_empty() {
// Word -> Vec<String>
// Word ==.word : String
// but Word is behind a HashMap key...
// So we iterate and convert it
// to Vec<String>
let mut words: HashMap<String, Vec<String>> = HashMap::new();
for s in self.word_map.clone().keys() {
words.insert(s.to_string(), vec![s.to_string()]);
}
words // Vec<String>
} else {
self.error_map.clone() // Vec<String>
};
for i in error_map.keys() {
if i.len() > 2 {
for u in 0..i.len() {
let mut permuted: Vec<char> = i.chars().collect();
permuted.remove(u);
let permutation = permuted.into_iter().collect::<String>();
if let Some(x) = result.get_mut(&permutation) {
let mut set: HashMap<String, ()> = HashMap::new();
for w in x.clone() {
set.entry(w.clone()).or_insert(());
}
// for w in error_map.get(i).unwrap().clone() {
// set.entry(w.word).or_insert(());
// }
let mut y: Vec<String> = Vec::new();
for k in set.keys() {
y.push(k.to_string());
}
x.clear();
for v in y {
x.push(v);
}
} else {
result
.entry(permutation)
.or_insert(error_map.get(i).unwrap().clone());
}
}
}
}
self.error_map = result;
}
fn check(&self, word: &str) -> Option<String>{
// regular functions don't capture parent scope.
// closures do catch parent scope
let find = |word: &str| -> Option<String> {
if let Some(x) = self.word_map.get(word) {
Some(x.word.clone())
} else if let Some(x) = self.error_map.get(word) {
if x.len() > 1 {
Some(self.find_best_match(x.to_vec()))
} else {
Some(x[0].clone())
}
} else {
None
}
};
if let Some(x) = find(word) {
return Some(x);
}
let mut permutations = vec![word.to_string()];
permutations.extend(self.permutations_of(word));
for v in permutations.clone() {
permutations.extend(self.permutations_of(&v));
}
for i in permutations {
if let Some(x) = find(&i) {
return Some(x);
}
}
return None;
}
}
fn main() {
let mut d = Dictionary::new();
// d.insert_with_permutations("Fork");
// d.insert_with_permutations("Doofus");
// d.insert_with_permutations_and_count("Bell", 32);
// d.insert_with_permutations_and_count("Belly", 29);
// d.insert_with_permutations_and_count("Bellow", 19);
// println!("{:?}", d.generate_permutations("Bell"));
// println!("{:?}", "===");
// println!("{:?}", d.generate_permutations("Belly"));
// println!("{:?}", "===");
// for i in d.word_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// for i in d.error_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// println!("{:?}", d.check("Dofus"));
// println!("{:?}", d.check("Dfus"));
// println!("{:?}", d.check("Doooofus"));
// println!("{:?}", d.check("Dooofus"));
// println!("{:?}", d.check("Forky"));
// println!("{:?}", d.check("Forkyy"));
// println!("{:?}", d.check("Fo"));
// println!("Hello, world!");
// Testing setup
use std::io;
loop {
let mut cmd = String::new();
io::stdin().read_line(&mut cmd).expect("no work... >:(");
if cmd.trim() == "add" {
let mut word = String::new();
let mut value = String::new();
println!("word? "); | match d.check(&cmd.trim()) {
Some(x) => println!("Did you mean {}?", x),
_ => println!("Not found :(")
};
}
}
} | io::stdin().read_line(&mut word).expect("no work... >:(");
println!("value? ");
io::stdin().read_line(&mut value).expect("no work... >:(");
d.insert_with_permutations_and_count(word.trim().as_ref(), value.trim().parse().expect("not a number"));
} else { | random_line_split |
main.rs | // -- SymSpell --
// Explanation at
// https://medium.com/@wolfgarbe/1000x-faster-spelling-correction-algorithm-2012-8701fcd87a5f
// TL;DR, HashTable keys are generated
// from all words + all possible
// permutations of those words with up
// to two deletes, and the data held
// in each key is the correctly spelled
// word (or possible words) with their
// count included to determine which
// of the possible words is more likely.
use std::collections::HashMap;
// word == word
// score == word priority // higher number == higher priority
#[derive(Debug, Clone)]
struct Word {
word: String,
score: u64
}
// word_map must be populated after
// Dictionary struct is created.
// word_map is the reference dictionary.
// All entries here are considered correct
// error_map is the compiled list
// of acceptable permutations.
// word_map is searched first for inputs.
// if none are found, then
// error_map is then searched for possible matches
#[derive(Debug, Clone)]
struct Dictionary {
word_map: HashMap<String, Word>,
error_map: HashMap<String, Vec<String>>,
error_distance: u8
}
// UNIMPLEMENTED YET
// only counts in word_map are measured to
// determined probable match
// only counts in word_map are incremented
// and check when inserting a new word.
// counts in error_map are ignored.
// only necessary for word_map,
// only word_map requires knowing
// the word score.
// error_map can be Hash<String, Vec<String>>
impl Word {
fn new(word: &str, count: u64) -> Word {
Word {
word: word.to_string(),
score: count
}
}
}
impl Dictionary {
fn | () -> Dictionary {
Dictionary {
word_map: HashMap::new(),
error_map: HashMap::new(),
error_distance: 2
}
}
fn insert(&mut self, word: &str) {
if let Some(x) = self.word_map.get_mut(word) {
x.score += 1;
} else {
self.word_map
.insert(
word.to_string(),
Word::new(word, 1)
);
}
}
fn insert_with_count(&mut self, word: &str, count: u64) {
self.insert(word);
self.word_map
.get_mut(word)
.unwrap()
.score = count;
}
// Permutations
// inserted don't replace the
// existing permutations,
// they are only are
// appended to the existing
// values.
fn insert_with_permutations(&mut self, word: &str) {
if let Some(_x) = self.word_map.get_mut(word) {
self.add_permutations(word);
} else {
self.insert(word);// insert new word.
self.add_permutations(word);
}
}
fn insert_with_permutations_and_count(&mut self, word: &str, count: u64) {
if let Some(x) = self.word_map.get_mut(word) {
x.score = count;
self.add_permutations(word);
} else {
self.insert_with_count(word, count);// insert new word.
self.add_permutations(word);
}
}
fn add_permutations(&mut self, word: &str) {
// Vec<String>
// Must only contain inserts of
// correct words
let permuted_keys = self.permutations_of(word);
for i in permuted_keys {
// if error key exists
if let Some(x) = self.error_map.get_mut(&i) {
let mut new_set: HashMap<String, ()> = HashMap::new();
// collect vector of existing
// correct possibilities
// into hashmap to prevent
// duplicate entries
for y in x.clone() {
new_set.insert(y, ());
}
// Add the new word to
// list of correct
// possibilities
// at this key
new_set.insert(word.to_string(), ());
x.clear();
for j in new_set.keys() {
x.push(j.to_string());
}
} else {
self.error_map
.insert(
i.clone(),
vec![word.to_string()]
);
}
}
}
fn generate_permutations(&self, word: &str) -> Vec<String> {
let mut permutations: Vec<String> = Vec::new();
// Generate permutations of this word
for i in 0..word.len() {
let mut permuted: Vec<char> = word.chars().collect();
permuted.remove(i);
permutations.push(permuted.into_iter().collect::<String>());
}
permutations
}
fn permutations_of(&self, word: &str) -> Vec<String> {
let mut permutation_list: HashMap<String, ()> = HashMap::new();
permutation_list.insert(word.to_string(), ());
for _i in 0..self.error_distance {
for u in permutation_list.clone().keys() {
for o in self.generate_permutations(u) {
permutation_list.insert(o, ());
}
}
}
let mut result: Vec<String> = Vec::new();
for i in permutation_list.keys() {
result.push(i.to_string());
}
result
}
fn find_best_match(&self, possibilities: Vec<String>) -> String {
let mut max = 0;
let mut best_match: Option<String> = None;
for i in possibilities.clone() {
let score = self.word_map[&i].score;
if score > max {
max = score;
best_match = Some(i.clone());
}
}
best_match.expect("Nothing matched in iterator... somehow...")
}
fn generate_errors(&mut self) {
let mut result = self.error_map.clone();
// word_map: HashMap<String, Word>
// error_map: HashMap<String, Vec<String>>
let error_map = if self.error_map.is_empty() {
// Word -> Vec<String>
// Word ==.word : String
// but Word is behind a HashMap key...
// So we iterate and convert it
// to Vec<String>
let mut words: HashMap<String, Vec<String>> = HashMap::new();
for s in self.word_map.clone().keys() {
words.insert(s.to_string(), vec![s.to_string()]);
}
words // Vec<String>
} else {
self.error_map.clone() // Vec<String>
};
for i in error_map.keys() {
if i.len() > 2 {
for u in 0..i.len() {
let mut permuted: Vec<char> = i.chars().collect();
permuted.remove(u);
let permutation = permuted.into_iter().collect::<String>();
if let Some(x) = result.get_mut(&permutation) {
let mut set: HashMap<String, ()> = HashMap::new();
for w in x.clone() {
set.entry(w.clone()).or_insert(());
}
// for w in error_map.get(i).unwrap().clone() {
// set.entry(w.word).or_insert(());
// }
let mut y: Vec<String> = Vec::new();
for k in set.keys() {
y.push(k.to_string());
}
x.clear();
for v in y {
x.push(v);
}
} else {
result
.entry(permutation)
.or_insert(error_map.get(i).unwrap().clone());
}
}
}
}
self.error_map = result;
}
fn check(&self, word: &str) -> Option<String>{
// regular functions don't capture parent scope.
// closures do catch parent scope
let find = |word: &str| -> Option<String> {
if let Some(x) = self.word_map.get(word) {
Some(x.word.clone())
} else if let Some(x) = self.error_map.get(word) {
if x.len() > 1 {
Some(self.find_best_match(x.to_vec()))
} else {
Some(x[0].clone())
}
} else {
None
}
};
if let Some(x) = find(word) {
return Some(x);
}
let mut permutations = vec![word.to_string()];
permutations.extend(self.permutations_of(word));
for v in permutations.clone() {
permutations.extend(self.permutations_of(&v));
}
for i in permutations {
if let Some(x) = find(&i) {
return Some(x);
}
}
return None;
}
}
fn main() {
let mut d = Dictionary::new();
// d.insert_with_permutations("Fork");
// d.insert_with_permutations("Doofus");
// d.insert_with_permutations_and_count("Bell", 32);
// d.insert_with_permutations_and_count("Belly", 29);
// d.insert_with_permutations_and_count("Bellow", 19);
// println!("{:?}", d.generate_permutations("Bell"));
// println!("{:?}", "===");
// println!("{:?}", d.generate_permutations("Belly"));
// println!("{:?}", "===");
// for i in d.word_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// for i in d.error_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// println!("{:?}", d.check("Dofus"));
// println!("{:?}", d.check("Dfus"));
// println!("{:?}", d.check("Doooofus"));
// println!("{:?}", d.check("Dooofus"));
// println!("{:?}", d.check("Forky"));
// println!("{:?}", d.check("Forkyy"));
// println!("{:?}", d.check("Fo"));
// println!("Hello, world!");
// Testing setup
use std::io;
loop {
let mut cmd = String::new();
io::stdin().read_line(&mut cmd).expect("no work... >:(");
if cmd.trim() == "add" {
let mut word = String::new();
let mut value = String::new();
println!("word? ");
io::stdin().read_line(&mut word).expect("no work... >:(");
println!("value? ");
io::stdin().read_line(&mut value).expect("no work... >:(");
d.insert_with_permutations_and_count(word.trim().as_ref(), value.trim().parse().expect("not a number"));
} else {
match d.check(&cmd.trim()) {
Some(x) => println!("Did you mean {}?", x),
_ => println!("Not found :(")
};
}
}
}
| new | identifier_name |
main.rs | // -- SymSpell --
// Explanation at
// https://medium.com/@wolfgarbe/1000x-faster-spelling-correction-algorithm-2012-8701fcd87a5f
// TL;DR, HashTable keys are generated
// from all words + all possible
// permutations of those words with up
// to two deletes, and the data held
// in each key is the correctly spelled
// word (or possible words) with their
// count included to determine which
// of the possible words is more likely.
use std::collections::HashMap;
// word == word
// score == word priority // higher number == higher priority
#[derive(Debug, Clone)]
struct Word {
word: String,
score: u64
}
// word_map must be populated after
// Dictionary struct is created.
// word_map is the reference dictionary.
// All entries here are considered correct
// error_map is the compiled list
// of acceptable permutations.
// word_map is searched first for inputs.
// if none are found, then
// error_map is then searched for possible matches
#[derive(Debug, Clone)]
struct Dictionary {
word_map: HashMap<String, Word>,
error_map: HashMap<String, Vec<String>>,
error_distance: u8
}
// UNIMPLEMENTED YET
// only counts in word_map are measured to
// determined probable match
// only counts in word_map are incremented
// and check when inserting a new word.
// counts in error_map are ignored.
// only necessary for word_map,
// only word_map requires knowing
// the word score.
// error_map can be Hash<String, Vec<String>>
impl Word {
fn new(word: &str, count: u64) -> Word {
Word {
word: word.to_string(),
score: count
}
}
}
impl Dictionary {
fn new() -> Dictionary {
Dictionary {
word_map: HashMap::new(),
error_map: HashMap::new(),
error_distance: 2
}
}
fn insert(&mut self, word: &str) {
if let Some(x) = self.word_map.get_mut(word) {
x.score += 1;
} else {
self.word_map
.insert(
word.to_string(),
Word::new(word, 1)
);
}
}
fn insert_with_count(&mut self, word: &str, count: u64) {
self.insert(word);
self.word_map
.get_mut(word)
.unwrap()
.score = count;
}
// Permutations
// inserted don't replace the
// existing permutations,
// they are only are
// appended to the existing
// values.
fn insert_with_permutations(&mut self, word: &str) {
if let Some(_x) = self.word_map.get_mut(word) {
self.add_permutations(word);
} else {
self.insert(word);// insert new word.
self.add_permutations(word);
}
}
fn insert_with_permutations_and_count(&mut self, word: &str, count: u64) {
if let Some(x) = self.word_map.get_mut(word) {
x.score = count;
self.add_permutations(word);
} else {
self.insert_with_count(word, count);// insert new word.
self.add_permutations(word);
}
}
fn add_permutations(&mut self, word: &str) {
// Vec<String>
// Must only contain inserts of
// correct words
let permuted_keys = self.permutations_of(word);
for i in permuted_keys {
// if error key exists
if let Some(x) = self.error_map.get_mut(&i) {
let mut new_set: HashMap<String, ()> = HashMap::new();
// collect vector of existing
// correct possibilities
// into hashmap to prevent
// duplicate entries
for y in x.clone() {
new_set.insert(y, ());
}
// Add the new word to
// list of correct
// possibilities
// at this key
new_set.insert(word.to_string(), ());
x.clear();
for j in new_set.keys() {
x.push(j.to_string());
}
} else {
self.error_map
.insert(
i.clone(),
vec![word.to_string()]
);
}
}
}
fn generate_permutations(&self, word: &str) -> Vec<String> {
let mut permutations: Vec<String> = Vec::new();
// Generate permutations of this word
for i in 0..word.len() {
let mut permuted: Vec<char> = word.chars().collect();
permuted.remove(i);
permutations.push(permuted.into_iter().collect::<String>());
}
permutations
}
fn permutations_of(&self, word: &str) -> Vec<String> {
let mut permutation_list: HashMap<String, ()> = HashMap::new();
permutation_list.insert(word.to_string(), ());
for _i in 0..self.error_distance {
for u in permutation_list.clone().keys() {
for o in self.generate_permutations(u) {
permutation_list.insert(o, ());
}
}
}
let mut result: Vec<String> = Vec::new();
for i in permutation_list.keys() {
result.push(i.to_string());
}
result
}
fn find_best_match(&self, possibilities: Vec<String>) -> String {
let mut max = 0;
let mut best_match: Option<String> = None;
for i in possibilities.clone() {
let score = self.word_map[&i].score;
if score > max {
max = score;
best_match = Some(i.clone());
}
}
best_match.expect("Nothing matched in iterator... somehow...")
}
fn generate_errors(&mut self) {
let mut result = self.error_map.clone();
// word_map: HashMap<String, Word>
// error_map: HashMap<String, Vec<String>>
let error_map = if self.error_map.is_empty() {
// Word -> Vec<String>
// Word ==.word : String
// but Word is behind a HashMap key...
// So we iterate and convert it
// to Vec<String>
let mut words: HashMap<String, Vec<String>> = HashMap::new();
for s in self.word_map.clone().keys() {
words.insert(s.to_string(), vec![s.to_string()]);
}
words // Vec<String>
} else {
self.error_map.clone() // Vec<String>
};
for i in error_map.keys() {
if i.len() > 2 {
for u in 0..i.len() {
let mut permuted: Vec<char> = i.chars().collect();
permuted.remove(u);
let permutation = permuted.into_iter().collect::<String>();
if let Some(x) = result.get_mut(&permutation) {
let mut set: HashMap<String, ()> = HashMap::new();
for w in x.clone() {
set.entry(w.clone()).or_insert(());
}
// for w in error_map.get(i).unwrap().clone() {
// set.entry(w.word).or_insert(());
// }
let mut y: Vec<String> = Vec::new();
for k in set.keys() {
y.push(k.to_string());
}
x.clear();
for v in y {
x.push(v);
}
} else {
result
.entry(permutation)
.or_insert(error_map.get(i).unwrap().clone());
}
}
}
}
self.error_map = result;
}
fn check(&self, word: &str) -> Option<String>{
// regular functions don't capture parent scope.
// closures do catch parent scope
let find = |word: &str| -> Option<String> {
if let Some(x) = self.word_map.get(word) | else if let Some(x) = self.error_map.get(word) {
if x.len() > 1 {
Some(self.find_best_match(x.to_vec()))
} else {
Some(x[0].clone())
}
} else {
None
}
};
if let Some(x) = find(word) {
return Some(x);
}
let mut permutations = vec![word.to_string()];
permutations.extend(self.permutations_of(word));
for v in permutations.clone() {
permutations.extend(self.permutations_of(&v));
}
for i in permutations {
if let Some(x) = find(&i) {
return Some(x);
}
}
return None;
}
}
fn main() {
let mut d = Dictionary::new();
// d.insert_with_permutations("Fork");
// d.insert_with_permutations("Doofus");
// d.insert_with_permutations_and_count("Bell", 32);
// d.insert_with_permutations_and_count("Belly", 29);
// d.insert_with_permutations_and_count("Bellow", 19);
// println!("{:?}", d.generate_permutations("Bell"));
// println!("{:?}", "===");
// println!("{:?}", d.generate_permutations("Belly"));
// println!("{:?}", "===");
// for i in d.word_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// for i in d.error_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// println!("{:?}", d.check("Dofus"));
// println!("{:?}", d.check("Dfus"));
// println!("{:?}", d.check("Doooofus"));
// println!("{:?}", d.check("Dooofus"));
// println!("{:?}", d.check("Forky"));
// println!("{:?}", d.check("Forkyy"));
// println!("{:?}", d.check("Fo"));
// println!("Hello, world!");
// Testing setup
use std::io;
loop {
let mut cmd = String::new();
io::stdin().read_line(&mut cmd).expect("no work... >:(");
if cmd.trim() == "add" {
let mut word = String::new();
let mut value = String::new();
println!("word? ");
io::stdin().read_line(&mut word).expect("no work... >:(");
println!("value? ");
io::stdin().read_line(&mut value).expect("no work... >:(");
d.insert_with_permutations_and_count(word.trim().as_ref(), value.trim().parse().expect("not a number"));
} else {
match d.check(&cmd.trim()) {
Some(x) => println!("Did you mean {}?", x),
_ => println!("Not found :(")
};
}
}
}
| {
Some(x.word.clone())
} | conditional_block |
consumer.rs | use super::{
delegate::DelegateMut,
observer::{DelegateObserver, Observer},
utils::modulus,
};
use crate::utils::{slice_assume_init_mut, slice_assume_init_ref, write_uninit_slice};
use core::{iter::Chain, mem::MaybeUninit, ptr, slice};
#[cfg(feature = "std")]
use std::io::{self, Write};
/// Consumer part of ring buffer.
///
/// # Mode
///
/// It can operate in immediate (by default) or postponed mode.
/// Mode could be switched using [`Self::postponed`]/[`Self::into_postponed`] and [`Self::into_immediate`] methods.
///
/// + In immediate mode removed and inserted items are automatically synchronized with the other end.
/// + In postponed mode synchronization occurs only when [`Self::sync`] or [`Self::into_immediate`] is called or when `Self` is dropped.
/// The reason to use postponed mode is that multiple subsequent operations are performed faster due to less frequent cache synchronization.
pub trait Consumer: Observer {
unsafe fn set_read_index(&self, value: usize);
/// Moves `read` pointer by `count` places forward.
///
/// # Safety
///
/// First `count` items in occupied memory must be moved out or dropped.
///
/// Must not be called concurrently.
unsafe fn advance_read_index(&self, count: usize) {
self.set_read_index((self.read_index() + count) % modulus(self));
}
/// Provides a direct access to the ring buffer occupied memory.
/// The difference from [`Self::as_slices`] is that this method provides slices of [`MaybeUninit`], so items may be moved out of slices.
///
/// Returns a pair of slices of stored items, the second one may be empty.
/// Elements with lower indices in slice are older. First slice contains older items that second one.
///
/// # Safety
///
/// All items are initialized. Elements must be removed starting from the beginning of first slice.
/// When all items are removed from the first slice then items must be removed from the beginning of the second slice.
///
/// *This method must be followed by [`Self::advance_read`] call with the number of items being removed previously as argument.*
/// *No other mutating calls allowed before that.*
fn occupied_slices(&self) -> (&[MaybeUninit<Self::Item>], &[MaybeUninit<Self::Item>]) {
let (first, second) = unsafe { self.unsafe_slices(self.read_index(), self.write_index()) };
(first as &_, second as &_)
}
/// Provides a direct mutable access to the ring buffer occupied memory.
///
/// Same as [`Self::occupied_slices`].
///
/// # Safety
///
/// When some item is replaced with uninitialized value then it must not be read anymore.
unsafe fn occupied_slices_mut(&mut self) -> (&mut [MaybeUninit<Self::Item>], &mut [MaybeUninit<Self::Item>]) {
self.unsafe_slices(self.read_index(), self.write_index())
}
/// Returns a pair of slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices();
(slice_assume_init_ref(left), slice_assume_init_ref(right))
}
}
/// Returns a pair of mutable slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices_mut();
(slice_assume_init_mut(left), slice_assume_init_mut(right))
}
}
/// Removes latest item from the ring buffer and returns it.
///
/// Returns `None` if the ring buffer is empty.
fn try_pop(&mut self) -> Option<Self::Item> {
if!self.is_empty() {
let elem = unsafe { self.occupied_slices().0.get_unchecked(0).assume_init_read() };
unsafe { self.advance_read_index(1) };
Some(elem)
} else {
None
}
}
/// Removes items from the ring buffer and writes them into a slice.
///
/// Returns count of items been removed.
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
let (left, right) = self.occupied_slices();
let count = if elems.len() < left.len() {
unsafe { write_uninit_slice(elems, left.get_unchecked(..elems.len())) };
elems.len()
} else {
let (left_elems, elems) = elems.split_at_mut(left.len());
unsafe { write_uninit_slice(left_elems, left) };
left.len()
+ if elems.len() < right.len() {
unsafe { write_uninit_slice(elems, right.get_unchecked(..elems.len())) };
elems.len()
} else {
unsafe { write_uninit_slice(elems.get_unchecked_mut(..right.len()), right) };
right.len()
}
};
unsafe { self.advance_read_index(count) };
count
}
fn into_iter(self) -> IntoIter<Self> {
IntoIter::new(self)
}
/// Returns an iterator that removes items one by one from the ring buffer.
fn pop_iter(&mut self) -> PopIter<'_, Self> {
PopIter::new(self)
}
/// Returns a front-to-back iterator containing references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter(&self) -> Iter<'_, Self> {
let (left, right) = self.as_slices();
left.iter().chain(right.iter())
}
/// Returns a front-to-back iterator that returns mutable references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter_mut(&mut self) -> IterMut<'_, Self> {
let (left, right) = self.as_mut_slices();
left.iter_mut().chain(right.iter_mut())
}
/// Removes at most `count` and at least `min(count, Self::len())` items from the buffer and safely drops them.
///
/// If there is no concurring producer activity then exactly `min(count, Self::len())` items are removed.
///
/// Returns the number of deleted items.
///
/// ```
/// # extern crate ringbuf;
/// # use ringbuf::{LocalRb, storage::Static, traits::*};
/// # fn main() {
/// let mut rb = LocalRb::<Static<i32, 8>>::default();
///
/// assert_eq!(rb.push_iter(0..8), 8);
///
/// assert_eq!(rb.skip(4), 4);
/// assert_eq!(rb.skip(8), 4);
/// assert_eq!(rb.skip(4), 0);
/// # }
/// ```
fn skip(&mut self, count: usize) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()).take(count) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let actual_count = usize::min(count, left.len() + right.len());
self.advance_read_index(actual_count);
actual_count
}
}
/// Removes all items from the buffer and safely drops them.
///
/// Returns the number of deleted items.
fn clear(&mut self) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let count = left.len() + right.len();
self.advance_read_index(count);
count
}
}
#[cfg(feature = "std")]
/// Removes at most first `count` bytes from the ring buffer and writes them into a [`Write`] instance.
/// If `count` is `None` then as much as possible bytes will be written.
///
/// Returns `Ok(n)` if `write` succeeded. `n` is number of bytes been written.
/// `n == 0` means that either `write` returned zero or ring buffer is empty.
///
/// If `write` is failed then original error is returned. In this case it is guaranteed that no items was written to the writer.
/// To achieve this we write only one contiguous slice at once. So this call may write less than `len` items even if the writer is ready to get more.
fn write_into<S: Write>(&mut self, writer: &mut S, count: Option<usize>) -> io::Result<usize>
where
Self: Consumer<Item = u8>,
{
let (left, _) = self.occupied_slices();
let count = usize::min(count.unwrap_or(left.len()), left.len());
let left_init = unsafe { slice_assume_init_ref(&left[..count]) };
let write_count = writer.write(left_init)?;
assert!(write_count <= count);
unsafe { self.advance_read_index(write_count) };
Ok(write_count)
}
}
pub struct IntoIter<C: Consumer>(C);
impl<C: Consumer> IntoIter<C> {
pub fn new(inner: C) -> Self {
Self(inner)
}
pub fn into_inner(self) -> C {
self.0
}
}
impl<C: Consumer> Iterator for IntoIter<C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.0.try_pop()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.0.occupied_len(), None)
}
}
/// An iterator that removes items from the ring buffer.
pub struct | <'a, C: Consumer> {
target: &'a C,
slices: (&'a [MaybeUninit<C::Item>], &'a [MaybeUninit<C::Item>]),
len: usize,
}
impl<'a, C: Consumer> PopIter<'a, C> {
pub fn new(target: &'a mut C) -> Self {
let slices = target.occupied_slices();
Self {
len: slices.0.len() + slices.1.len(),
slices,
target,
}
}
}
impl<'a, C: Consumer> Iterator for PopIter<'a, C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match self.slices.0.len() {
0 => None,
n => {
let item = unsafe { self.slices.0.get_unchecked(0).assume_init_read() };
if n == 1 {
(self.slices.0, self.slices.1) = (self.slices.1, &[]);
} else {
self.slices.0 = unsafe { self.slices.0.get_unchecked(1..n) };
}
Some(item)
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl<'a, C: Consumer> ExactSizeIterator for PopIter<'a, C> {
fn len(&self) -> usize {
self.slices.0.len() + self.slices.1.len()
}
}
impl<'a, C: Consumer> Drop for PopIter<'a, C> {
fn drop(&mut self) {
unsafe { self.target.advance_read_index(self.len - self.len()) };
}
}
/// Iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type Iter<'a, C: Consumer> = Chain<slice::Iter<'a, C::Item>, slice::Iter<'a, C::Item>>;
/// Mutable iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type IterMut<'a, C: Consumer> = Chain<slice::IterMut<'a, C::Item>, slice::IterMut<'a, C::Item>>;
#[macro_export]
macro_rules! impl_consumer_traits {
($type:ident $(< $( $param:tt $( : $first_bound:tt $(+ $next_bound:tt )* )? ),+ >)?) => {
#[cfg(feature = "std")]
impl $(< $( $param $( : $first_bound $(+ $next_bound )* )? ),+ >)? std::io::Read for $type $(< $( $param ),+ >)?
where
Self: $crate::traits::Consumer<Item = u8>,
{
fn read(&mut self, buffer: &mut [u8]) -> std::io::Result<usize> {
use $crate::consumer::Consumer;
let n = self.pop_slice(buffer);
if n == 0 &&!buffer.is_empty() {
Err(std::io::ErrorKind::WouldBlock.into())
} else {
Ok(n)
}
}
}
};
}
pub trait DelegateConsumer: DelegateObserver + DelegateMut
where
Self::Base: Consumer,
{
}
impl<D: DelegateConsumer> Consumer for D
where
D::Base: Consumer,
{
#[inline]
unsafe fn set_read_index(&self, value: usize) {
self.base().set_read_index(value)
}
#[inline]
unsafe fn advance_read_index(&self, count: usize) {
self.base().advance_read_index(count)
}
#[inline]
fn occupied_slices(&self) -> (&[core::mem::MaybeUninit<Self::Item>], &[core::mem::MaybeUninit<Self::Item>]) {
self.base().occupied_slices()
}
#[inline]
unsafe fn occupied_slices_mut(&mut self) -> (&mut [core::mem::MaybeUninit<Self::Item>], &mut [core::mem::MaybeUninit<Self::Item>]) {
self.base_mut().occupied_slices_mut()
}
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
self.base().as_slices()
}
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) {
self.base_mut().as_mut_slices()
}
#[inline]
fn try_pop(&mut self) -> Option<Self::Item> {
self.base_mut().try_pop()
}
#[inline]
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
self.base_mut().pop_slice(elems)
}
#[inline]
fn iter(&self) -> Iter<'_, Self> {
self.base().iter()
}
#[inline]
fn iter_mut(&mut self) -> IterMut<'_, Self> {
self.base_mut().iter_mut()
}
#[inline]
fn skip(&mut self, count: usize) -> usize {
self.base_mut().skip(count)
}
#[inline]
fn clear(&mut self) -> usize {
self.base_mut().clear()
}
}
| PopIter | identifier_name |
consumer.rs | use super::{
delegate::DelegateMut,
observer::{DelegateObserver, Observer},
utils::modulus,
};
use crate::utils::{slice_assume_init_mut, slice_assume_init_ref, write_uninit_slice};
use core::{iter::Chain, mem::MaybeUninit, ptr, slice};
#[cfg(feature = "std")]
use std::io::{self, Write};
/// Consumer part of ring buffer.
///
/// # Mode
///
/// It can operate in immediate (by default) or postponed mode.
/// Mode could be switched using [`Self::postponed`]/[`Self::into_postponed`] and [`Self::into_immediate`] methods.
///
/// + In immediate mode removed and inserted items are automatically synchronized with the other end.
/// + In postponed mode synchronization occurs only when [`Self::sync`] or [`Self::into_immediate`] is called or when `Self` is dropped.
/// The reason to use postponed mode is that multiple subsequent operations are performed faster due to less frequent cache synchronization.
pub trait Consumer: Observer {
unsafe fn set_read_index(&self, value: usize);
/// Moves `read` pointer by `count` places forward.
///
/// # Safety
///
/// First `count` items in occupied memory must be moved out or dropped.
///
/// Must not be called concurrently.
unsafe fn advance_read_index(&self, count: usize) {
self.set_read_index((self.read_index() + count) % modulus(self));
}
/// Provides a direct access to the ring buffer occupied memory.
/// The difference from [`Self::as_slices`] is that this method provides slices of [`MaybeUninit`], so items may be moved out of slices.
///
/// Returns a pair of slices of stored items, the second one may be empty.
/// Elements with lower indices in slice are older. First slice contains older items that second one.
///
/// # Safety
///
/// All items are initialized. Elements must be removed starting from the beginning of first slice.
/// When all items are removed from the first slice then items must be removed from the beginning of the second slice.
///
/// *This method must be followed by [`Self::advance_read`] call with the number of items being removed previously as argument.*
/// *No other mutating calls allowed before that.*
fn occupied_slices(&self) -> (&[MaybeUninit<Self::Item>], &[MaybeUninit<Self::Item>]) {
let (first, second) = unsafe { self.unsafe_slices(self.read_index(), self.write_index()) };
(first as &_, second as &_)
}
/// Provides a direct mutable access to the ring buffer occupied memory.
///
/// Same as [`Self::occupied_slices`].
///
/// # Safety
///
/// When some item is replaced with uninitialized value then it must not be read anymore.
unsafe fn occupied_slices_mut(&mut self) -> (&mut [MaybeUninit<Self::Item>], &mut [MaybeUninit<Self::Item>]) {
self.unsafe_slices(self.read_index(), self.write_index())
}
/// Returns a pair of slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices();
(slice_assume_init_ref(left), slice_assume_init_ref(right))
}
}
/// Returns a pair of mutable slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices_mut();
(slice_assume_init_mut(left), slice_assume_init_mut(right))
}
}
/// Removes latest item from the ring buffer and returns it.
///
/// Returns `None` if the ring buffer is empty.
fn try_pop(&mut self) -> Option<Self::Item> {
if!self.is_empty() {
let elem = unsafe { self.occupied_slices().0.get_unchecked(0).assume_init_read() };
unsafe { self.advance_read_index(1) };
Some(elem)
} else {
None
}
}
/// Removes items from the ring buffer and writes them into a slice.
///
/// Returns count of items been removed.
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
let (left, right) = self.occupied_slices();
let count = if elems.len() < left.len() {
unsafe { write_uninit_slice(elems, left.get_unchecked(..elems.len())) };
elems.len()
} else {
let (left_elems, elems) = elems.split_at_mut(left.len());
unsafe { write_uninit_slice(left_elems, left) };
left.len()
+ if elems.len() < right.len() {
unsafe { write_uninit_slice(elems, right.get_unchecked(..elems.len())) };
elems.len()
} else {
unsafe { write_uninit_slice(elems.get_unchecked_mut(..right.len()), right) };
right.len()
}
};
unsafe { self.advance_read_index(count) };
count
}
fn into_iter(self) -> IntoIter<Self> {
IntoIter::new(self)
}
/// Returns an iterator that removes items one by one from the ring buffer.
fn pop_iter(&mut self) -> PopIter<'_, Self> {
PopIter::new(self)
}
/// Returns a front-to-back iterator containing references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter(&self) -> Iter<'_, Self> {
let (left, right) = self.as_slices();
left.iter().chain(right.iter())
}
/// Returns a front-to-back iterator that returns mutable references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter_mut(&mut self) -> IterMut<'_, Self> {
let (left, right) = self.as_mut_slices();
left.iter_mut().chain(right.iter_mut())
}
/// Removes at most `count` and at least `min(count, Self::len())` items from the buffer and safely drops them.
///
/// If there is no concurring producer activity then exactly `min(count, Self::len())` items are removed.
///
/// Returns the number of deleted items.
///
/// ```
/// # extern crate ringbuf;
/// # use ringbuf::{LocalRb, storage::Static, traits::*};
/// # fn main() {
/// let mut rb = LocalRb::<Static<i32, 8>>::default();
///
/// assert_eq!(rb.push_iter(0..8), 8);
///
/// assert_eq!(rb.skip(4), 4);
/// assert_eq!(rb.skip(8), 4);
/// assert_eq!(rb.skip(4), 0);
/// # }
/// ```
fn skip(&mut self, count: usize) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()).take(count) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let actual_count = usize::min(count, left.len() + right.len());
self.advance_read_index(actual_count);
actual_count
}
}
/// Removes all items from the buffer and safely drops them.
///
/// Returns the number of deleted items.
fn clear(&mut self) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let count = left.len() + right.len();
self.advance_read_index(count);
count
}
}
#[cfg(feature = "std")]
/// Removes at most first `count` bytes from the ring buffer and writes them into a [`Write`] instance.
/// If `count` is `None` then as much as possible bytes will be written.
///
/// Returns `Ok(n)` if `write` succeeded. `n` is number of bytes been written.
/// `n == 0` means that either `write` returned zero or ring buffer is empty.
///
/// If `write` is failed then original error is returned. In this case it is guaranteed that no items was written to the writer.
/// To achieve this we write only one contiguous slice at once. So this call may write less than `len` items even if the writer is ready to get more.
fn write_into<S: Write>(&mut self, writer: &mut S, count: Option<usize>) -> io::Result<usize>
where
Self: Consumer<Item = u8>,
{
let (left, _) = self.occupied_slices();
let count = usize::min(count.unwrap_or(left.len()), left.len());
let left_init = unsafe { slice_assume_init_ref(&left[..count]) };
let write_count = writer.write(left_init)?;
assert!(write_count <= count);
unsafe { self.advance_read_index(write_count) };
Ok(write_count)
}
}
pub struct IntoIter<C: Consumer>(C);
impl<C: Consumer> IntoIter<C> {
pub fn new(inner: C) -> Self {
Self(inner)
}
pub fn into_inner(self) -> C {
self.0
}
}
impl<C: Consumer> Iterator for IntoIter<C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.0.try_pop()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.0.occupied_len(), None)
}
}
/// An iterator that removes items from the ring buffer.
pub struct PopIter<'a, C: Consumer> {
target: &'a C,
slices: (&'a [MaybeUninit<C::Item>], &'a [MaybeUninit<C::Item>]),
len: usize,
}
impl<'a, C: Consumer> PopIter<'a, C> {
pub fn new(target: &'a mut C) -> Self {
let slices = target.occupied_slices();
Self {
len: slices.0.len() + slices.1.len(),
slices,
target,
}
}
}
impl<'a, C: Consumer> Iterator for PopIter<'a, C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match self.slices.0.len() {
0 => None,
n => {
let item = unsafe { self.slices.0.get_unchecked(0).assume_init_read() };
if n == 1 {
(self.slices.0, self.slices.1) = (self.slices.1, &[]);
} else {
self.slices.0 = unsafe { self.slices.0.get_unchecked(1..n) };
}
Some(item)
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl<'a, C: Consumer> ExactSizeIterator for PopIter<'a, C> {
fn len(&self) -> usize {
self.slices.0.len() + self.slices.1.len()
}
}
impl<'a, C: Consumer> Drop for PopIter<'a, C> {
fn drop(&mut self) {
unsafe { self.target.advance_read_index(self.len - self.len()) };
}
}
/// Iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type Iter<'a, C: Consumer> = Chain<slice::Iter<'a, C::Item>, slice::Iter<'a, C::Item>>;
/// Mutable iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type IterMut<'a, C: Consumer> = Chain<slice::IterMut<'a, C::Item>, slice::IterMut<'a, C::Item>>;
#[macro_export]
macro_rules! impl_consumer_traits {
($type:ident $(< $( $param:tt $( : $first_bound:tt $(+ $next_bound:tt )* )? ),+ >)?) => {
#[cfg(feature = "std")]
impl $(< $( $param $( : $first_bound $(+ $next_bound )* )? ),+ >)? std::io::Read for $type $(< $( $param ),+ >)?
where
Self: $crate::traits::Consumer<Item = u8>,
{
fn read(&mut self, buffer: &mut [u8]) -> std::io::Result<usize> {
use $crate::consumer::Consumer;
let n = self.pop_slice(buffer);
if n == 0 &&!buffer.is_empty() {
Err(std::io::ErrorKind::WouldBlock.into())
} else {
Ok(n)
}
}
}
};
}
pub trait DelegateConsumer: DelegateObserver + DelegateMut
where
Self::Base: Consumer,
{
}
impl<D: DelegateConsumer> Consumer for D
where
D::Base: Consumer,
{
#[inline]
unsafe fn set_read_index(&self, value: usize) {
self.base().set_read_index(value)
}
#[inline]
unsafe fn advance_read_index(&self, count: usize) {
self.base().advance_read_index(count)
}
#[inline]
fn occupied_slices(&self) -> (&[core::mem::MaybeUninit<Self::Item>], &[core::mem::MaybeUninit<Self::Item>]) {
self.base().occupied_slices()
}
#[inline]
unsafe fn occupied_slices_mut(&mut self) -> (&mut [core::mem::MaybeUninit<Self::Item>], &mut [core::mem::MaybeUninit<Self::Item>]) {
self.base_mut().occupied_slices_mut()
}
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
self.base().as_slices()
}
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) { |
#[inline]
fn try_pop(&mut self) -> Option<Self::Item> {
self.base_mut().try_pop()
}
#[inline]
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
self.base_mut().pop_slice(elems)
}
#[inline]
fn iter(&self) -> Iter<'_, Self> {
self.base().iter()
}
#[inline]
fn iter_mut(&mut self) -> IterMut<'_, Self> {
self.base_mut().iter_mut()
}
#[inline]
fn skip(&mut self, count: usize) -> usize {
self.base_mut().skip(count)
}
#[inline]
fn clear(&mut self) -> usize {
self.base_mut().clear()
}
} | self.base_mut().as_mut_slices()
} | random_line_split |
consumer.rs | use super::{
delegate::DelegateMut,
observer::{DelegateObserver, Observer},
utils::modulus,
};
use crate::utils::{slice_assume_init_mut, slice_assume_init_ref, write_uninit_slice};
use core::{iter::Chain, mem::MaybeUninit, ptr, slice};
#[cfg(feature = "std")]
use std::io::{self, Write};
/// Consumer part of ring buffer.
///
/// # Mode
///
/// It can operate in immediate (by default) or postponed mode.
/// Mode could be switched using [`Self::postponed`]/[`Self::into_postponed`] and [`Self::into_immediate`] methods.
///
/// + In immediate mode removed and inserted items are automatically synchronized with the other end.
/// + In postponed mode synchronization occurs only when [`Self::sync`] or [`Self::into_immediate`] is called or when `Self` is dropped.
/// The reason to use postponed mode is that multiple subsequent operations are performed faster due to less frequent cache synchronization.
pub trait Consumer: Observer {
unsafe fn set_read_index(&self, value: usize);
/// Moves `read` pointer by `count` places forward.
///
/// # Safety
///
/// First `count` items in occupied memory must be moved out or dropped.
///
/// Must not be called concurrently.
unsafe fn advance_read_index(&self, count: usize) {
self.set_read_index((self.read_index() + count) % modulus(self));
}
/// Provides a direct access to the ring buffer occupied memory.
/// The difference from [`Self::as_slices`] is that this method provides slices of [`MaybeUninit`], so items may be moved out of slices.
///
/// Returns a pair of slices of stored items, the second one may be empty.
/// Elements with lower indices in slice are older. First slice contains older items that second one.
///
/// # Safety
///
/// All items are initialized. Elements must be removed starting from the beginning of first slice.
/// When all items are removed from the first slice then items must be removed from the beginning of the second slice.
///
/// *This method must be followed by [`Self::advance_read`] call with the number of items being removed previously as argument.*
/// *No other mutating calls allowed before that.*
fn occupied_slices(&self) -> (&[MaybeUninit<Self::Item>], &[MaybeUninit<Self::Item>]) {
let (first, second) = unsafe { self.unsafe_slices(self.read_index(), self.write_index()) };
(first as &_, second as &_)
}
/// Provides a direct mutable access to the ring buffer occupied memory.
///
/// Same as [`Self::occupied_slices`].
///
/// # Safety
///
/// When some item is replaced with uninitialized value then it must not be read anymore.
unsafe fn occupied_slices_mut(&mut self) -> (&mut [MaybeUninit<Self::Item>], &mut [MaybeUninit<Self::Item>]) {
self.unsafe_slices(self.read_index(), self.write_index())
}
/// Returns a pair of slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices();
(slice_assume_init_ref(left), slice_assume_init_ref(right))
}
}
/// Returns a pair of mutable slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices_mut();
(slice_assume_init_mut(left), slice_assume_init_mut(right))
}
}
/// Removes latest item from the ring buffer and returns it.
///
/// Returns `None` if the ring buffer is empty.
fn try_pop(&mut self) -> Option<Self::Item> {
if!self.is_empty() {
let elem = unsafe { self.occupied_slices().0.get_unchecked(0).assume_init_read() };
unsafe { self.advance_read_index(1) };
Some(elem)
} else {
None
}
}
/// Removes items from the ring buffer and writes them into a slice.
///
/// Returns count of items been removed.
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
let (left, right) = self.occupied_slices();
let count = if elems.len() < left.len() {
unsafe { write_uninit_slice(elems, left.get_unchecked(..elems.len())) };
elems.len()
} else {
let (left_elems, elems) = elems.split_at_mut(left.len());
unsafe { write_uninit_slice(left_elems, left) };
left.len()
+ if elems.len() < right.len() {
unsafe { write_uninit_slice(elems, right.get_unchecked(..elems.len())) };
elems.len()
} else |
};
unsafe { self.advance_read_index(count) };
count
}
fn into_iter(self) -> IntoIter<Self> {
IntoIter::new(self)
}
/// Returns an iterator that removes items one by one from the ring buffer.
fn pop_iter(&mut self) -> PopIter<'_, Self> {
PopIter::new(self)
}
/// Returns a front-to-back iterator containing references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter(&self) -> Iter<'_, Self> {
let (left, right) = self.as_slices();
left.iter().chain(right.iter())
}
/// Returns a front-to-back iterator that returns mutable references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter_mut(&mut self) -> IterMut<'_, Self> {
let (left, right) = self.as_mut_slices();
left.iter_mut().chain(right.iter_mut())
}
/// Removes at most `count` and at least `min(count, Self::len())` items from the buffer and safely drops them.
///
/// If there is no concurring producer activity then exactly `min(count, Self::len())` items are removed.
///
/// Returns the number of deleted items.
///
/// ```
/// # extern crate ringbuf;
/// # use ringbuf::{LocalRb, storage::Static, traits::*};
/// # fn main() {
/// let mut rb = LocalRb::<Static<i32, 8>>::default();
///
/// assert_eq!(rb.push_iter(0..8), 8);
///
/// assert_eq!(rb.skip(4), 4);
/// assert_eq!(rb.skip(8), 4);
/// assert_eq!(rb.skip(4), 0);
/// # }
/// ```
fn skip(&mut self, count: usize) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()).take(count) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let actual_count = usize::min(count, left.len() + right.len());
self.advance_read_index(actual_count);
actual_count
}
}
/// Removes all items from the buffer and safely drops them.
///
/// Returns the number of deleted items.
fn clear(&mut self) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let count = left.len() + right.len();
self.advance_read_index(count);
count
}
}
#[cfg(feature = "std")]
/// Removes at most first `count` bytes from the ring buffer and writes them into a [`Write`] instance.
/// If `count` is `None` then as much as possible bytes will be written.
///
/// Returns `Ok(n)` if `write` succeeded. `n` is number of bytes been written.
/// `n == 0` means that either `write` returned zero or ring buffer is empty.
///
/// If `write` is failed then original error is returned. In this case it is guaranteed that no items was written to the writer.
/// To achieve this we write only one contiguous slice at once. So this call may write less than `len` items even if the writer is ready to get more.
fn write_into<S: Write>(&mut self, writer: &mut S, count: Option<usize>) -> io::Result<usize>
where
Self: Consumer<Item = u8>,
{
let (left, _) = self.occupied_slices();
let count = usize::min(count.unwrap_or(left.len()), left.len());
let left_init = unsafe { slice_assume_init_ref(&left[..count]) };
let write_count = writer.write(left_init)?;
assert!(write_count <= count);
unsafe { self.advance_read_index(write_count) };
Ok(write_count)
}
}
pub struct IntoIter<C: Consumer>(C);
impl<C: Consumer> IntoIter<C> {
pub fn new(inner: C) -> Self {
Self(inner)
}
pub fn into_inner(self) -> C {
self.0
}
}
impl<C: Consumer> Iterator for IntoIter<C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.0.try_pop()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.0.occupied_len(), None)
}
}
/// An iterator that removes items from the ring buffer.
pub struct PopIter<'a, C: Consumer> {
target: &'a C,
slices: (&'a [MaybeUninit<C::Item>], &'a [MaybeUninit<C::Item>]),
len: usize,
}
impl<'a, C: Consumer> PopIter<'a, C> {
pub fn new(target: &'a mut C) -> Self {
let slices = target.occupied_slices();
Self {
len: slices.0.len() + slices.1.len(),
slices,
target,
}
}
}
impl<'a, C: Consumer> Iterator for PopIter<'a, C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match self.slices.0.len() {
0 => None,
n => {
let item = unsafe { self.slices.0.get_unchecked(0).assume_init_read() };
if n == 1 {
(self.slices.0, self.slices.1) = (self.slices.1, &[]);
} else {
self.slices.0 = unsafe { self.slices.0.get_unchecked(1..n) };
}
Some(item)
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl<'a, C: Consumer> ExactSizeIterator for PopIter<'a, C> {
fn len(&self) -> usize {
self.slices.0.len() + self.slices.1.len()
}
}
impl<'a, C: Consumer> Drop for PopIter<'a, C> {
fn drop(&mut self) {
unsafe { self.target.advance_read_index(self.len - self.len()) };
}
}
/// Iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type Iter<'a, C: Consumer> = Chain<slice::Iter<'a, C::Item>, slice::Iter<'a, C::Item>>;
/// Mutable iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type IterMut<'a, C: Consumer> = Chain<slice::IterMut<'a, C::Item>, slice::IterMut<'a, C::Item>>;
#[macro_export]
macro_rules! impl_consumer_traits {
($type:ident $(< $( $param:tt $( : $first_bound:tt $(+ $next_bound:tt )* )? ),+ >)?) => {
#[cfg(feature = "std")]
impl $(< $( $param $( : $first_bound $(+ $next_bound )* )? ),+ >)? std::io::Read for $type $(< $( $param ),+ >)?
where
Self: $crate::traits::Consumer<Item = u8>,
{
fn read(&mut self, buffer: &mut [u8]) -> std::io::Result<usize> {
use $crate::consumer::Consumer;
let n = self.pop_slice(buffer);
if n == 0 &&!buffer.is_empty() {
Err(std::io::ErrorKind::WouldBlock.into())
} else {
Ok(n)
}
}
}
};
}
pub trait DelegateConsumer: DelegateObserver + DelegateMut
where
Self::Base: Consumer,
{
}
impl<D: DelegateConsumer> Consumer for D
where
D::Base: Consumer,
{
#[inline]
unsafe fn set_read_index(&self, value: usize) {
self.base().set_read_index(value)
}
#[inline]
unsafe fn advance_read_index(&self, count: usize) {
self.base().advance_read_index(count)
}
#[inline]
fn occupied_slices(&self) -> (&[core::mem::MaybeUninit<Self::Item>], &[core::mem::MaybeUninit<Self::Item>]) {
self.base().occupied_slices()
}
#[inline]
unsafe fn occupied_slices_mut(&mut self) -> (&mut [core::mem::MaybeUninit<Self::Item>], &mut [core::mem::MaybeUninit<Self::Item>]) {
self.base_mut().occupied_slices_mut()
}
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
self.base().as_slices()
}
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) {
self.base_mut().as_mut_slices()
}
#[inline]
fn try_pop(&mut self) -> Option<Self::Item> {
self.base_mut().try_pop()
}
#[inline]
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
self.base_mut().pop_slice(elems)
}
#[inline]
fn iter(&self) -> Iter<'_, Self> {
self.base().iter()
}
#[inline]
fn iter_mut(&mut self) -> IterMut<'_, Self> {
self.base_mut().iter_mut()
}
#[inline]
fn skip(&mut self, count: usize) -> usize {
self.base_mut().skip(count)
}
#[inline]
fn clear(&mut self) -> usize {
self.base_mut().clear()
}
}
| {
unsafe { write_uninit_slice(elems.get_unchecked_mut(..right.len()), right) };
right.len()
} | conditional_block |
v4.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code to interact with Schemas.
This module contains all the code related with the schemas used by this lib to decode many PackedFile types.
The basic structure of an `Schema` is:
```ignore
(
version: 3,
versioned_files: [
DB("_kv_battle_ai_ability_usage_variables_tables", [
(
version: 0,
fields: [
(
name: "key",
field_type: StringU8,
is_key: true,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
(
name: "value",
field_type: F32,
is_key: false,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
],
localised_fields: [],
),
]),
],
)
```
Inside the schema there are `VersionedFile` variants of different types, with a Vec of `Definition`, one for each version of that PackedFile supported.
!*/
use rayon::prelude::*;
use ron::de::from_bytes;
use serde_derive::{Serialize, Deserialize};
use std::collections::{BTreeMap, HashMap};
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::Path;
use crate::error::Result;
use crate::schema::Schema as SchemaV5;
use crate::schema::Definition as DefinitionV5;
use crate::schema::FieldType as FieldTypeV5;
use crate::schema::Field as FieldV5;
//---------------------------------------------------------------------------//
// Enum & Structs
//---------------------------------------------------------------------------//
/// This struct represents a Schema File in memory, ready to be used to decode versioned PackedFiles.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct SchemaV4 {
/// It stores the structural version of the Schema.
version: u16,
/// It stores the versioned files inside the Schema.
versioned_files: Vec<VersionedFileV4>
}
/// This enum defines all types of versioned files that the schema system supports.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum VersionedFileV4 {
/// It stores a `Vec<Definition>` with the definitions for each version of AnimFragment files decoded.
AnimFragment(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of AnomTable files decoded.
AnimTable(Vec<DefinitionV4>),
/// It stores the name of the table, and a `Vec<Definition>` with the definitions for each version of that table decoded.
DB(String, Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` to decode the dependencies of a PackFile.
DepManager(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of Loc files decoded (currently, only version `1`).
Loc(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of MatchedCombat files decoded.
MatchedCombat(Vec<DefinitionV4>),
}
/// This struct contains all the data needed to decode a specific version of a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Default, Serialize, Deserialize)]
pub struct DefinitionV4 {
/// The version of the PackedFile the definition is for. These versions are:
/// - `-1`: for fake `Definition`, used for dependency resolving stuff.
/// - `0`: for unversioned PackedFiles.
/// - `1+`: for versioned PackedFiles.
version: i32,
/// This is a collection of all `Field`s the PackedFile uses, in the order it uses them.
fields: Vec<FieldV4>,
/// This is a list of all the fields from this definition that are moved to a Loc PackedFile on exporting.
localised_fields: Vec<FieldV4>,
}
/// This struct holds all the relevant data do properly decode a field from a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub struct FieldV4 {
/// Name of the field. Should contain no spaces, using `_` instead.
pub name: String,
/// Type of the field.
pub field_type: FieldTypeV4,
/// `True` if the field is a `Key` field of a table. `False` otherwise.
pub is_key: bool,
/// The default value of the field.
pub default_value: Option<String>,
/// If the field's data corresponds to a filename.
pub is_filename: bool,
/// Path where the file in the data of the field can be, if it's restricted to one path.
pub filename_relative_path: Option<String>,
/// `Some(referenced_table, referenced_column)` if the field is referencing another table/column. `None` otherwise.
pub is_reference: Option<(String, String)>,
/// `Some(referenced_columns)` if the field is using another column/s from the referenced table for lookup values.
pub lookup: Option<Vec<String>>,
/// Aclarative description of what the field is for.
pub description: String,
/// Visual position in CA's Table. `-1` means we don't know its position.
pub ca_order: i16,
/// Variable to tell if this column is a bitwise column (spanned accross multiple columns) or not. Only applicable to numeric fields.
pub is_bitwise: i32,
/// Variable that specifies the "Enum" values for each value in this field.
pub enum_values: BTreeMap<i32, String>,
/// If the field is part of a 3-part RGB column set, and which one (R, G or B) it is.
pub is_part_of_colour: Option<u8>,
}
/// This enum defines every type of field the lib can encode/decode.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub enum FieldTypeV4 {
Boolean,
F32,
F64,
I16,
I32,
I64,
ColourRGB,
StringU8,
StringU16,
OptionalStringU8,
OptionalStringU16,
SequenceU16(Box<DefinitionV4>),
SequenceU32(Box<DefinitionV4>)
}
/// This struct represents a bunch of Schema Patches in memory.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatches {
/// It stores the patches split by games.
patches: HashMap<String, SchemaPatch>
}
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatch{
/// It stores a list of per-table, per-column patches.
tables: HashMap<String, HashMap<String, HashMap<String, String>>>,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
/// Implementation of `SchemaV4`.
impl SchemaV4 {
/// This function loads a `Schema` to memory from a file in the `schemas/` folder.
pub fn load(path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
/// This function tries to update the Schema at the provided Path to a more recent format.
pub fn update(schema_path: &Path, patches_path: &Path, game_name: &str) -> Result<()> {
let schema_legacy = Self::load(schema_path)?;
let mut schema = SchemaV5::from(&schema_legacy);
// Fix for empty dependencies, again.
schema.definitions.par_iter_mut().for_each(|(table_name, definitions)| {
definitions.iter_mut().for_each(|definition| {
definition.fields.iter_mut().for_each(|field| {
if let Some((ref_table, ref_column)) = field.is_reference(None) {
if ref_table.trim().is_empty() || ref_column.trim().is_empty() {
dbg!(&table_name);
dbg!(field.name());
field.is_reference = None;
}
}
})
})
});
let schema_patches = SchemaPatches::load(patches_path);
if let Ok(schema_patches) = schema_patches {
if let Some(patches) = schema_patches.patches.get(game_name) {
schema.patches = patches.tables.clone();
}
}
// Disable saving until 4.0 releases.
schema.save(schema_path)?;
Ok(())
}
}
/// Implementation of `Definition`.
impl DefinitionV4 {
/// This function creates a new empty `Definition` for the version provided.
pub fn new(version: i32) -> DefinitionV4 {
DefinitionV4 {
version,
localised_fields: vec![],
fields: vec![],
}
}
/// This function returns the version of the provided definition.
pub fn version(&self) -> i32 {
self.version
}
/// This function returns a mutable reference to the list of fields in the definition.
pub fn fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.fields
}
/// This function returns the localised fields of the provided definition
pub fn localised_fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.localised_fields
}
}
/// Default implementation of `FieldType`.
impl Default for FieldV4 {
fn default() -> Self {
Self {
name: String::from("new_field"),
field_type: FieldTypeV4::StringU8,
is_key: false,
default_value: None,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: String::from(""),
ca_order: -1,
is_bitwise: 0,
enum_values: BTreeMap::new(),
is_part_of_colour: None,
}
}
}
/// Default implementation of `SchemaV4`.
impl Default for SchemaV4 {
fn default() -> Self {
Self {
version: 3,
versioned_files: vec![]
}
}
}
impl From<&SchemaV4> for SchemaV5 {
fn from(legacy_schema: &SchemaV4) -> Self {
let mut schema = Self::default();
legacy_schema.versioned_files.iter()
.filter_map(|versioned| if let VersionedFileV4::DB(name, definitions) = versioned { Some((name, definitions)) } else { None })
.for_each(|(name, definitions)| {
definitions.iter().for_each(|definition| {
schema.add_definition(name, &From::from(definition));
})
});
schema
}
}
impl From<&DefinitionV4> for DefinitionV5 {
fn fr | egacy_table_definition: &DefinitionV4) -> Self {
let mut definition = Self::new(legacy_table_definition.version, None);
let fields = legacy_table_definition.fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_fields(fields);
let fields = legacy_table_definition.localised_fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_localised_fields(fields);
definition
}
}
impl From<&FieldV4> for FieldV5 {
fn from(legacy_field: &FieldV4) -> Self {
Self {
name: legacy_field.name.to_owned(),
field_type: From::from(&legacy_field.field_type),
is_key: legacy_field.is_key,
default_value: legacy_field.default_value.clone(),
is_filename: legacy_field.is_filename,
filename_relative_path: legacy_field.filename_relative_path.clone(),
is_reference: legacy_field.is_reference.clone(),
lookup: legacy_field.lookup.clone(),
description: legacy_field.description.to_owned(),
ca_order: legacy_field.ca_order,
..Default::default()
}
}
}
impl From<&FieldTypeV4> for FieldTypeV5 {
fn from(legacy_field_type: &FieldTypeV4) -> Self {
match legacy_field_type {
FieldTypeV4::Boolean => Self::Boolean,
FieldTypeV4::I16 => Self::I16,
FieldTypeV4::I32 => Self::I32,
FieldTypeV4::I64 => Self::I64,
FieldTypeV4::F32 => Self::F32,
FieldTypeV4::F64 => Self::F64,
FieldTypeV4::ColourRGB => Self::ColourRGB,
FieldTypeV4::StringU8 => Self::StringU8,
FieldTypeV4::StringU16 => Self::StringU16,
FieldTypeV4::OptionalStringU8 => Self::OptionalStringU8,
FieldTypeV4::OptionalStringU16 => Self::OptionalStringU16,
FieldTypeV4::SequenceU16(sequence) => Self::SequenceU16(Box::new(From::from(&**sequence))),
FieldTypeV4::SequenceU32(sequence) => Self::SequenceU32(Box::new(From::from(&**sequence))),
}
}
}
impl SchemaPatches {
/// This function loads a `SchemaPatches` to memory from a file in the `schemas/` folder.
pub fn load(file_path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(file_path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
}
| om(l | identifier_name |
v4.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code to interact with Schemas.
This module contains all the code related with the schemas used by this lib to decode many PackedFile types.
The basic structure of an `Schema` is:
```ignore
(
version: 3,
versioned_files: [
DB("_kv_battle_ai_ability_usage_variables_tables", [
(
version: 0,
fields: [
(
name: "key",
field_type: StringU8,
is_key: true,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
(
name: "value",
field_type: F32,
is_key: false,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
],
localised_fields: [],
),
]),
],
)
```
Inside the schema there are `VersionedFile` variants of different types, with a Vec of `Definition`, one for each version of that PackedFile supported.
!*/
use rayon::prelude::*;
use ron::de::from_bytes;
use serde_derive::{Serialize, Deserialize};
use std::collections::{BTreeMap, HashMap};
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::Path;
use crate::error::Result;
use crate::schema::Schema as SchemaV5;
use crate::schema::Definition as DefinitionV5;
use crate::schema::FieldType as FieldTypeV5;
use crate::schema::Field as FieldV5;
//---------------------------------------------------------------------------//
// Enum & Structs
//---------------------------------------------------------------------------//
/// This struct represents a Schema File in memory, ready to be used to decode versioned PackedFiles.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct SchemaV4 {
/// It stores the structural version of the Schema.
version: u16,
/// It stores the versioned files inside the Schema.
versioned_files: Vec<VersionedFileV4>
}
/// This enum defines all types of versioned files that the schema system supports.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum VersionedFileV4 {
/// It stores a `Vec<Definition>` with the definitions for each version of AnimFragment files decoded.
AnimFragment(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of AnomTable files decoded.
AnimTable(Vec<DefinitionV4>),
/// It stores the name of the table, and a `Vec<Definition>` with the definitions for each version of that table decoded.
DB(String, Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` to decode the dependencies of a PackFile.
DepManager(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of Loc files decoded (currently, only version `1`).
Loc(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of MatchedCombat files decoded.
MatchedCombat(Vec<DefinitionV4>),
}
/// This struct contains all the data needed to decode a specific version of a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Default, Serialize, Deserialize)]
pub struct DefinitionV4 {
/// The version of the PackedFile the definition is for. These versions are:
/// - `-1`: for fake `Definition`, used for dependency resolving stuff.
/// - `0`: for unversioned PackedFiles.
/// - `1+`: for versioned PackedFiles.
version: i32,
/// This is a collection of all `Field`s the PackedFile uses, in the order it uses them.
fields: Vec<FieldV4>,
/// This is a list of all the fields from this definition that are moved to a Loc PackedFile on exporting.
localised_fields: Vec<FieldV4>,
}
/// This struct holds all the relevant data do properly decode a field from a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub struct FieldV4 {
/// Name of the field. Should contain no spaces, using `_` instead.
pub name: String,
/// Type of the field.
pub field_type: FieldTypeV4,
/// `True` if the field is a `Key` field of a table. `False` otherwise.
pub is_key: bool,
/// The default value of the field.
pub default_value: Option<String>,
/// If the field's data corresponds to a filename.
pub is_filename: bool,
/// Path where the file in the data of the field can be, if it's restricted to one path.
pub filename_relative_path: Option<String>,
/// `Some(referenced_table, referenced_column)` if the field is referencing another table/column. `None` otherwise.
pub is_reference: Option<(String, String)>,
/// `Some(referenced_columns)` if the field is using another column/s from the referenced table for lookup values.
pub lookup: Option<Vec<String>>,
/// Aclarative description of what the field is for.
pub description: String,
/// Visual position in CA's Table. `-1` means we don't know its position.
pub ca_order: i16,
/// Variable to tell if this column is a bitwise column (spanned accross multiple columns) or not. Only applicable to numeric fields.
pub is_bitwise: i32,
/// Variable that specifies the "Enum" values for each value in this field.
pub enum_values: BTreeMap<i32, String>,
/// If the field is part of a 3-part RGB column set, and which one (R, G or B) it is.
pub is_part_of_colour: Option<u8>,
}
/// This enum defines every type of field the lib can encode/decode.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub enum FieldTypeV4 {
Boolean,
F32,
F64,
I16,
I32,
I64,
ColourRGB,
StringU8,
StringU16,
OptionalStringU8,
OptionalStringU16,
SequenceU16(Box<DefinitionV4>),
SequenceU32(Box<DefinitionV4>)
}
/// This struct represents a bunch of Schema Patches in memory.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatches {
/// It stores the patches split by games.
patches: HashMap<String, SchemaPatch>
}
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatch{
/// It stores a list of per-table, per-column patches.
tables: HashMap<String, HashMap<String, HashMap<String, String>>>,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
/// Implementation of `SchemaV4`.
impl SchemaV4 {
/// This function loads a `Schema` to memory from a file in the `schemas/` folder.
pub fn load(path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
/// This function tries to update the Schema at the provided Path to a more recent format.
pub fn update(schema_path: &Path, patches_path: &Path, game_name: &str) -> Result<()> {
let schema_legacy = Self::load(schema_path)?;
let mut schema = SchemaV5::from(&schema_legacy);
// Fix for empty dependencies, again.
schema.definitions.par_iter_mut().for_each(|(table_name, definitions)| {
definitions.iter_mut().for_each(|definition| {
definition.fields.iter_mut().for_each(|field| {
if let Some((ref_table, ref_column)) = field.is_reference(None) {
if ref_table.trim().is_empty() || ref_column.trim().is_empty() {
dbg!(&table_name);
dbg!(field.name());
field.is_reference = None;
}
}
})
})
});
let schema_patches = SchemaPatches::load(patches_path);
if let Ok(schema_patches) = schema_patches {
if let Some(patches) = schema_patches.patches.get(game_name) {
schema.patches = patches.tables.clone();
}
}
// Disable saving until 4.0 releases.
schema.save(schema_path)?;
Ok(())
}
}
/// Implementation of `Definition`.
impl DefinitionV4 {
/// This function creates a new empty `Definition` for the version provided.
pub fn new(version: i32) -> DefinitionV4 {
DefinitionV4 {
version,
localised_fields: vec![],
fields: vec![],
}
}
/// This function returns the version of the provided definition.
pub fn version(&self) -> i32 {
self.version
}
/// This function returns a mutable reference to the list of fields in the definition.
pub fn fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.fields
}
/// This function returns the localised fields of the provided definition
pub fn localised_fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.localised_fields
}
}
/// Default implementation of `FieldType`.
impl Default for FieldV4 {
fn default() -> Self {
Self {
name: String::from("new_field"),
field_type: FieldTypeV4::StringU8,
is_key: false,
default_value: None,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: String::from(""),
ca_order: -1,
is_bitwise: 0,
enum_values: BTreeMap::new(),
is_part_of_colour: None,
}
}
}
/// Default implementation of `SchemaV4`.
impl Default for SchemaV4 {
fn default() -> Self {
Self {
version: 3,
versioned_files: vec![]
}
}
}
impl From<&SchemaV4> for SchemaV5 {
fn from(legacy_schema: &SchemaV4) -> Self {
let mut schema = Self::default();
legacy_schema.versioned_files.iter()
.filter_map(|versioned| if let VersionedFileV4::DB(name, definitions) = versioned { | lse { None })
.for_each(|(name, definitions)| {
definitions.iter().for_each(|definition| {
schema.add_definition(name, &From::from(definition));
})
});
schema
}
}
impl From<&DefinitionV4> for DefinitionV5 {
fn from(legacy_table_definition: &DefinitionV4) -> Self {
let mut definition = Self::new(legacy_table_definition.version, None);
let fields = legacy_table_definition.fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_fields(fields);
let fields = legacy_table_definition.localised_fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_localised_fields(fields);
definition
}
}
impl From<&FieldV4> for FieldV5 {
fn from(legacy_field: &FieldV4) -> Self {
Self {
name: legacy_field.name.to_owned(),
field_type: From::from(&legacy_field.field_type),
is_key: legacy_field.is_key,
default_value: legacy_field.default_value.clone(),
is_filename: legacy_field.is_filename,
filename_relative_path: legacy_field.filename_relative_path.clone(),
is_reference: legacy_field.is_reference.clone(),
lookup: legacy_field.lookup.clone(),
description: legacy_field.description.to_owned(),
ca_order: legacy_field.ca_order,
..Default::default()
}
}
}
impl From<&FieldTypeV4> for FieldTypeV5 {
fn from(legacy_field_type: &FieldTypeV4) -> Self {
match legacy_field_type {
FieldTypeV4::Boolean => Self::Boolean,
FieldTypeV4::I16 => Self::I16,
FieldTypeV4::I32 => Self::I32,
FieldTypeV4::I64 => Self::I64,
FieldTypeV4::F32 => Self::F32,
FieldTypeV4::F64 => Self::F64,
FieldTypeV4::ColourRGB => Self::ColourRGB,
FieldTypeV4::StringU8 => Self::StringU8,
FieldTypeV4::StringU16 => Self::StringU16,
FieldTypeV4::OptionalStringU8 => Self::OptionalStringU8,
FieldTypeV4::OptionalStringU16 => Self::OptionalStringU16,
FieldTypeV4::SequenceU16(sequence) => Self::SequenceU16(Box::new(From::from(&**sequence))),
FieldTypeV4::SequenceU32(sequence) => Self::SequenceU32(Box::new(From::from(&**sequence))),
}
}
}
impl SchemaPatches {
/// This function loads a `SchemaPatches` to memory from a file in the `schemas/` folder.
pub fn load(file_path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(file_path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
}
| Some((name, definitions)) } e | conditional_block |
v4.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code to interact with Schemas.
This module contains all the code related with the schemas used by this lib to decode many PackedFile types.
The basic structure of an `Schema` is:
```ignore
(
version: 3,
versioned_files: [
DB("_kv_battle_ai_ability_usage_variables_tables", [
(
version: 0,
fields: [
(
name: "key",
field_type: StringU8,
is_key: true,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
(
name: "value",
field_type: F32,
is_key: false,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
],
localised_fields: [],
),
]),
],
)
```
Inside the schema there are `VersionedFile` variants of different types, with a Vec of `Definition`, one for each version of that PackedFile supported.
!*/
use rayon::prelude::*;
use ron::de::from_bytes;
use serde_derive::{Serialize, Deserialize};
use std::collections::{BTreeMap, HashMap};
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::Path;
use crate::error::Result;
use crate::schema::Schema as SchemaV5;
use crate::schema::Definition as DefinitionV5;
use crate::schema::FieldType as FieldTypeV5;
use crate::schema::Field as FieldV5;
//---------------------------------------------------------------------------//
// Enum & Structs
//---------------------------------------------------------------------------//
/// This struct represents a Schema File in memory, ready to be used to decode versioned PackedFiles.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct SchemaV4 {
/// It stores the structural version of the Schema.
version: u16,
/// It stores the versioned files inside the Schema.
versioned_files: Vec<VersionedFileV4>
}
/// This enum defines all types of versioned files that the schema system supports.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum VersionedFileV4 {
/// It stores a `Vec<Definition>` with the definitions for each version of AnimFragment files decoded.
AnimFragment(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of AnomTable files decoded.
AnimTable(Vec<DefinitionV4>),
/// It stores the name of the table, and a `Vec<Definition>` with the definitions for each version of that table decoded.
DB(String, Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` to decode the dependencies of a PackFile.
DepManager(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of Loc files decoded (currently, only version `1`).
Loc(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of MatchedCombat files decoded.
MatchedCombat(Vec<DefinitionV4>),
}
/// This struct contains all the data needed to decode a specific version of a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Default, Serialize, Deserialize)]
pub struct DefinitionV4 {
/// The version of the PackedFile the definition is for. These versions are:
/// - `-1`: for fake `Definition`, used for dependency resolving stuff.
/// - `0`: for unversioned PackedFiles.
/// - `1+`: for versioned PackedFiles.
version: i32,
/// This is a collection of all `Field`s the PackedFile uses, in the order it uses them.
fields: Vec<FieldV4>,
/// This is a list of all the fields from this definition that are moved to a Loc PackedFile on exporting.
localised_fields: Vec<FieldV4>,
}
/// This struct holds all the relevant data do properly decode a field from a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub struct FieldV4 {
/// Name of the field. Should contain no spaces, using `_` instead.
pub name: String,
/// Type of the field.
pub field_type: FieldTypeV4,
/// `True` if the field is a `Key` field of a table. `False` otherwise.
pub is_key: bool,
/// The default value of the field.
pub default_value: Option<String>,
/// If the field's data corresponds to a filename.
pub is_filename: bool,
/// Path where the file in the data of the field can be, if it's restricted to one path.
pub filename_relative_path: Option<String>,
/// `Some(referenced_table, referenced_column)` if the field is referencing another table/column. `None` otherwise.
pub is_reference: Option<(String, String)>,
/// `Some(referenced_columns)` if the field is using another column/s from the referenced table for lookup values.
pub lookup: Option<Vec<String>>,
/// Aclarative description of what the field is for.
pub description: String,
/// Visual position in CA's Table. `-1` means we don't know its position.
pub ca_order: i16,
/// Variable to tell if this column is a bitwise column (spanned accross multiple columns) or not. Only applicable to numeric fields.
pub is_bitwise: i32,
/// Variable that specifies the "Enum" values for each value in this field.
pub enum_values: BTreeMap<i32, String>,
/// If the field is part of a 3-part RGB column set, and which one (R, G or B) it is.
pub is_part_of_colour: Option<u8>,
}
/// This enum defines every type of field the lib can encode/decode.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub enum FieldTypeV4 {
Boolean,
F32,
F64,
I16,
I32,
I64,
ColourRGB,
StringU8,
StringU16,
OptionalStringU8,
OptionalStringU16,
SequenceU16(Box<DefinitionV4>),
SequenceU32(Box<DefinitionV4>)
}
/// This struct represents a bunch of Schema Patches in memory.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatches {
/// It stores the patches split by games.
patches: HashMap<String, SchemaPatch>
}
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatch{
/// It stores a list of per-table, per-column patches.
tables: HashMap<String, HashMap<String, HashMap<String, String>>>,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
/// Implementation of `SchemaV4`.
impl SchemaV4 {
/// This function loads a `Schema` to memory from a file in the `schemas/` folder.
pub fn load(path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
/// This function tries to update the Schema at the provided Path to a more recent format.
pub fn update(schema_path: &Path, patches_path: &Path, game_name: &str) -> Result<()> {
let schema_legacy = Self::load(schema_path)?;
let mut schema = SchemaV5::from(&schema_legacy);
// Fix for empty dependencies, again.
schema.definitions.par_iter_mut().for_each(|(table_name, definitions)| {
definitions.iter_mut().for_each(|definition| {
definition.fields.iter_mut().for_each(|field| {
if let Some((ref_table, ref_column)) = field.is_reference(None) {
if ref_table.trim().is_empty() || ref_column.trim().is_empty() {
dbg!(&table_name);
dbg!(field.name());
field.is_reference = None;
}
}
})
})
});
let schema_patches = SchemaPatches::load(patches_path);
if let Ok(schema_patches) = schema_patches {
if let Some(patches) = schema_patches.patches.get(game_name) {
schema.patches = patches.tables.clone();
}
}
// Disable saving until 4.0 releases.
schema.save(schema_path)?;
Ok(())
}
}
/// Implementation of `Definition`.
impl DefinitionV4 {
/// This function creates a new empty `Definition` for the version provided.
pub fn new(version: i32) -> DefinitionV4 {
DefinitionV4 {
version,
localised_fields: vec![],
fields: vec![],
}
}
/// This function returns the version of the provided definition.
pub fn version(&self) -> i32 {
self.version
}
/// This function returns a mutable reference to the list of fields in the definition.
pub fn fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.fields
}
/// This function returns the localised fields of the provided definition
pub fn localised_fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.localised_fields
}
}
/// Default implementation of `FieldType`.
impl Default for FieldV4 {
fn default() -> Self {
Self {
name: String::from("new_field"),
field_type: FieldTypeV4::StringU8,
is_key: false,
default_value: None,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: String::from(""),
ca_order: -1,
is_bitwise: 0,
enum_values: BTreeMap::new(),
is_part_of_colour: None,
}
}
}
/// Default implementation of `SchemaV4`.
impl Default for SchemaV4 {
fn default() -> Self {
Self {
version: 3,
versioned_files: vec![]
}
}
}
impl From<&SchemaV4> for SchemaV5 {
fn from(legacy_schema: &SchemaV4) -> Self {
let mut schema = Self::default();
legacy_schema.versioned_files.iter()
.filter_map(|versioned| if let VersionedFileV4::DB(name, definitions) = versioned { Some((name, definitions)) } else { None })
.for_each(|(name, definitions)| {
definitions.iter().for_each(|definition| {
schema.add_definition(name, &From::from(definition));
})
});
schema
}
}
impl From<&DefinitionV4> for DefinitionV5 {
fn from(legacy_table_definition: &DefinitionV4) -> Self {
let mut definition = Self::new(legacy_table_definition.version, None);
let fields = legacy_table_definition.fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_fields(fields);
let fields = legacy_table_definition.localised_fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_localised_fields(fields);
definition
}
}
impl From<&FieldV4> for FieldV5 {
fn from(legacy_field: &FieldV4) -> Self {
Self {
name: legacy_field.name.to_owned(),
field_type: From::from(&legacy_field.field_type),
is_key: legacy_field.is_key,
default_value: legacy_field.default_value.clone(),
is_filename: legacy_field.is_filename,
filename_relative_path: legacy_field.filename_relative_path.clone(),
is_reference: legacy_field.is_reference.clone(),
lookup: legacy_field.lookup.clone(),
description: legacy_field.description.to_owned(),
ca_order: legacy_field.ca_order,
..Default::default()
}
}
}
impl From<&FieldTypeV4> for FieldTypeV5 {
fn from(legacy_field_type: &FieldTypeV4) -> Self {
match legacy_field_type {
FieldTypeV4::Boolean => Self::Boolean, | FieldTypeV4::I16 => Self::I16,
FieldTypeV4::I32 => Self::I32,
FieldTypeV4::I64 => Self::I64,
FieldTypeV4::F32 => Self::F32,
FieldTypeV4::F64 => Self::F64,
FieldTypeV4::ColourRGB => Self::ColourRGB,
FieldTypeV4::StringU8 => Self::StringU8,
FieldTypeV4::StringU16 => Self::StringU16,
FieldTypeV4::OptionalStringU8 => Self::OptionalStringU8,
FieldTypeV4::OptionalStringU16 => Self::OptionalStringU16,
FieldTypeV4::SequenceU16(sequence) => Self::SequenceU16(Box::new(From::from(&**sequence))),
FieldTypeV4::SequenceU32(sequence) => Self::SequenceU32(Box::new(From::from(&**sequence))),
}
}
}
impl SchemaPatches {
/// This function loads a `SchemaPatches` to memory from a file in the `schemas/` folder.
pub fn load(file_path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(file_path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
} | random_line_split |
|
traits.rs | thing. Clearly, this could lead to naming conflicts! But since Rust makes you import the
/// traits you plan to use, crates are free to take advantage of this superpower, and conflicts are
/// rare in practice.
///
/// The reason Clone and Iterator methods work without any special imports is that they’re always
/// in scope by default: they’re part of the standard prelude, names that Rust automatically
/// imports into every module. In fact, the prelude is mostly a carefully chosen selection of
/// traits.
///
///
/// ## when to use which (trait objects vs generic functions)
///
/// * dynamic dispatch: trait objects
/// * static dispatch: generic functions with trait bounds
///
/// How to understand "trait object"? Trait objects are very similar to how Java does dynamic
/// dispatch, ie "polymorphism". In Java, you can have references that point to various subtypes of
/// an interface. When you call methods on the reference, depending on the concrete subtype, a
/// different implemention may get invoked. That's called "dynamic dispatch". Trait objects are
/// equivalent to those references in Java and you can use "trait objects" to do dynamic dispatch.
///
/// Both features are based on traits. They have a lot in common but there are subtle differences.
///
/// 1. Trait objects are the right choice when you need a collection of values of mixed types, all together.
///
/// trait Vegetable {...}
///
/// struct Salad<V: Vegetable> {
/// veggies: Vec<V>
/// }
///
/// This works but each such salad consists entirely of a single type of vegetable.
///
/// struct Salad {
/// veggies: Vec<Vegetable> // error: `Vegetable` does not have
/// // a constant size
/// }
///
///
/// struct Salad {
/// veggies: Vec<Box<Vegetable>>
/// }
///
/// This code works because each Box<Vegetable> can own any type of vegetable, but the box itself
/// has a constant size—two pointers—suitable for storing in a vector.
///
/// 2. Another possible reason to use trait objects is to reduce the total amount of compiled code.
/// Rust may have to compile a generic function many times, once for each type it’s used with.
/// This could make the binary large, a phenomenon called code bloat in C++ circles.
///
/// ### when to use generic functions
///
/// Generics have two important advantages over trait objects, with the result that in Rust,
/// generics are the more common choice.
///
/// 1. The first advantage is speed. Each time the Rust compiler generates machine code for a
/// generic function, it knows which types it’s working with, so it knows at that time which
/// write method to call. This is called "static dispatch", in contrast to "dynamic dispatch".
///
/// Compare that to the behavior with trait objects. Rust never knows what type of value a trait
/// object points to until run time.
///
/// 2. The second advantage of generics is that not every trait can support trait objects. Traits
/// support several features, such as static methods, that work only with generics: they rule out
/// trait objects entirely.
///
/// You can only make "object-safe traits" into trait objects. Some complex rules govern all the
/// properties that make a trait object safe, but in practice, only two rules are relevant. A
/// trait is object safe if all the methods defined in the trait have the following properties:
///
/// * The return type isn’t Self.
/// * There are no generic type parameters.
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::io;
use std::io::Write;
use std::ops::Mul;
pub fn run() {
let tweet = Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
};
println!("1 new tweet: {}", tweet.summarize());
let article = NewsArticle {
headline: String::from("Make America Great Again"),
location: String::from("Washington DC"),
author: String::from("Trump"),
content: String::from("Make America Great Again"),
};
println!("1 news article: {}", article.summarize3());
notify(tweet);
notify2(article);
}
pub trait Summary {
fn summarize(&self) -> String;
// trait can have methods with default implementation
// this can be overridden by types that implement this trait
fn summarize2(&self) -> String {
String::from("(Read more...)")
}
// Default implementations can call other methods in the same trait, even if those other
// methods don’t have a default implementation. In this way, a trait can provide a lot of
// useful functionality and only require implementors to specify a small part of it.
// This is the "template pattern". The template itself is implemented in the trait while
// various hooks are implemented by the types themselves.
fn summarize3(&self) -> String {
format!("(Read more from {}...)", self.summarize_author())
}
fn summarize_author(&self) -> String;
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
impl Summary for NewsArticle {
fn summarize(&self) -> String {
format!("{}, by {} ({})", self.headline, self.author, self.location)
}
fn summarize_author(&self) -> String {
format!("by {}", self.author)
}
}
impl NewsArticle {
// You can't define this function in the "impl Summary for NewsArticle" block
// because it's not a function of the NewsArticle trait!
pub fn get_headline(&self) -> &String {
&self.headline
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
fn summarize_author(&self) -> String {
format!("@{}", self.username)
}
}
// traits as parameters
// this function can be called with any type that implements Summary
pub fn notify(item: impl Summary) {
println!("Breaking news! {}", item.summarize());
}
// "trait bound"
// this is equivalent to the function above, which is actually syntax sugar
pub fn notify2<T: Summary>(item: T) {
println!("Breaking news! {}", item.summarize());
}
pub trait Display {
fn show(&self) -> String;
}
// specify multiple traits using +
pub fn notify3<T: Summary + Display>(item: T) {
println!("Breaking news! {}", item.summarize());
println!("Show me the item: {}", item.show());
}
// "trait bound" using "where" clause between return type and open curly brace
// this is easier to read when you have many trait bounds
pub fn some_function<T, U>(_t: T, _u: U) -> i32
where
T: Display + Clone,
U: Clone + Summary,
{
99
}
// returning types that implement traits
pub fn returns_summarizable() -> impl Summary {
Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
}
}
// This is a plain function that takes a "trait object".
pub fn say_hello(out: &mut dyn Write) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// In contrast, this is a generic function whose type parameter W is bound by "Write" trait.
pub fn say_hello2<W: Write>(out: &mut W) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// Find the top occurring elements from a vector.
// This is how to special a type parameter that implements multiple traits.
pub fn top_ten<T>(values: &[T]) -> Vec<&T>
where
T: Debug + Hash + Eq,
{
let mut map = HashMap::new();
for value in values {
let counter = map.entry(value).or_insert(0);
*counter += 1;
}
let mut map_vec: Vec<_> = map.into_iter().collect();
map_vec.sort_by(|a, b| b.1.cmp(&a.1));
map_vec.into_iter().map(|a| a.0).take(10).collect()
}
pub trait Mapper {}
pub trait Reducer {}
pub trait Serialize {}
pub struct DataSet {}
// Generic functions can have multiple type parameters: M and R.
pub fn run_query<M: Mapper + Serialize, R: Reducer + Serialize>(
_data: &DataSet,
_map: M,
_reduce: R,
) {
unimplemented!()
}
// Alternative syntax: bounds can be specified in the where clause
pub fn run_query2<M, R>(_data: &DataSet, _map: M, _reduce: R)
where
M: Mapper + Serialize,
R: Reducer + Serialize,
{
unimplemented!()
}
pub trait MeasureDistance {}
// A generic function can have both lifetime parameters and type parameters. Lifetime parameters
// come first.
pub fn nearest<'t, 'c, P>(_target: &'t P, _candidates: &'c [P]) -> &'c P
where
P: MeasureDistance,
{
unimplemented!()
}
/// This is a generic function. It works with parameters that implement the "Ord" trait.
/// The compiler generates custom machine code for each type T that you actually use.
pub fn min<T: Ord>(m: T, n: T) -> T {
if m < n {
m
} else {
n
}
}
/// Rust lets you implement any trait on any type, as long as either the trait or the type is
/// introduced in the current crate. This means that any time you want to add a method to any type,
/// you can use a trait to do it. This is called an "extension trait".
pub trait IsEmoji {
fn is_emoji(&self) -> bool;
}
impl IsEmoji for char {
fn is_emoji(&self) -> bool {
unimplemented!()
}
}
/// We said earlier that when you implement a trait, either the trait or the type must be new in
/// the current crate. This is called the "coherence rule". It helps Rust ensure that trait
/// implementations are unique. Your code can’t "impl Write for u8", because both Write and u8 are
/// defined in the standard library. If Rust let crates do that, there could be multiple
/// implementations of Write for u8, in different crates, and Rust would have no reasonable way to
/// decide which implementation to use for a given method call.
///You can even use a generic impl block to add an extension trait to a whole family of types at once.
pub struct HtmlDocument {}
pub trait WriteHtml {
fn write_html(&mut self, html: &HtmlDocument) -> io::Result<()>;
}
/// You can write HTML to any std::io writer.
impl<W: Write> WriteHtml for W {
fn write_html(&mut self, _html: &HtmlDocument) -> io::Result<()> {
unimplemented!()
}
}
/// Self in traits
///
/// A trait can use the keyword Self as a type. It represents the trait itself.
pub trait MyClone {
fn clone(&self) -> Self;
}
/// Subtraits: we can define a trait is an extension of another trait
/// This means that every type that implements Creature must also implement the Display trait.
pub trait Creature: Display {
fn position(&self) -> (i32, i32);
}
// impl Display for Broom {}
// impl Creature for Broom {}
pub trait Animal {
// Instance methods
fn name(&self) -> &'static str;
fn noise(&self) -> &'static str;
// Traits can provide default implementation.
fn talk(&self) {
println!("{} says {}", self.name(), self.noise());
}
}
pub struct Sheep {
naked: bool,
name: &'static str,
}
impl Sheep {
fn is_naked(&self) -> bool {
self.naked
}
pub fn shear(&mut self) {
if self.is_naked() {
// You can call the trait method "name()" here because Sheep implements
// the Animal trait.
println!("{} is already naked...", self.name());
} else {
println!("{} gets a haircut!", self.name);
self.naked = true;
}
}
}
impl Animal for Sheep {
fn name(&self) -> &'static str {
self.name
}
fn noise(&self) -> &'static str {
if self.is_naked() {
"baaaaaa?"
} else {
"baaaaaa!"
}
}
// Default implementation can be overridden.
fn talk(&self) {
println!("{} pauses briefly... {}", self.name(), self.noise());
}
}
/// The compiler is capable of providing basic implementations for some traits via
/// the #[derive] attribute. The following is a list of derivable traits:
///
/// * Comparison traits: Eq, PartialEq, Ord, PartialOrd.
/// * Clone: to create T from &T via a copy.
/// * Copy: to give a type 'copy semantics' instead of'move semantics'.
/// * Hash: to compute a hash from &T.
/// * Default: to create an empty instance of a data type.
/// * Debug: to format a value using the {:?} formatter.
/// Returning Traits with "dyn"
///
/// https://doc.rust-lang.org/edition-guide/rust-2018/trait-system/dyn-trait-for-trait-objects.html
///
/// The Rust compiler needs to know how much space every function's return type requires. This
/// means all your functions have to return a concrete type. Unlike other languages, if you have a
/// trait like Animal, you can't write a function that returns Animal, because its different
/// implementations will need different amounts of memory.
///
/// However, there's an easy workaround. Instead of returning a trait object directly, our
/// functions return a Box which contains some Animal. A box is just a reference to some memory in
/// the heap. Because a reference has a statically-known size, and the compiler can guarantee it
/// points to a heap-allocated Animal, we can return a trait from our function!
///
/// Rust tries to be as explicit as possible whenever it allocates memory on the heap. So if your
/// function returns a pointer-to-trait-on-heap in this way, you need to write the return type with
/// the dyn keyword, e.g. Box<dyn Animal>.
| fn name(&self) -> &'static str {
"Dave"
}
fn noise(&self) -> &'static str {
"Moo"
}
}
pub fn random_animal(random_number: f64) -> Box<dyn Animal> {
if random_number < 0.5 {
Box::new(Sheep {
name: "Bob",
naked: true,
})
} else {
Box::new(Cow {})
}
}
/// Operator overloading
/// https://doc.rust-lang.org/core/ops/
/// In Rust, many of the operators can be overloaded via traits. That is, some operators can be used to accomplish different tasks based on their input arguments. This is possible because operators are syntactic sugar for method calls. For example, the + operator in a + b calls the add method (as in a.add(b)). This add method is part of the Add trait. Hence, the + operator can be used by any implementor of the Add trait.
pub struct Rectangle {
width: u32,
height: u32,
}
impl Mul<u32> for Rectangle {
type Output = Self;
fn mul(self, times: u32) -> Self::Output {
Rectangle {
width: self.width * times,
height: self.height * times,
}
}
}
/// impl Trait
/// If your function returns a type that implements MyTrait, you can write its return
/// type as -> impl MyTrait. This can help simplify your type signatures quite a lot!
pub fn combine_vecs(v: Vec<i32>, u: Vec<i32>) -> impl Iterator<Item = i32> {
// You could also write the following which is a lot more complicated.
// -> std::iter::Chain<std::vec::IntoIter<i32>, std::vec::IntoIter<i32>> {
v.into_iter().chain(u.into_iter())
}
/// More importantly, some Rust types can't be written out. For example, every
/// closure has its own unnamed concrete type. Before impl Trait syntax, you had
/// to allocate on the heap in order to return a closure. But now you can do it
/// all statically, like this:
pub fn make_adder(y: i32) -> impl Fn(i32) -> i32 {
move |x: i32| x + y
}
/// Polymorphism via trait objects
pub trait Draw {
fn draw(&self);
}
pub struct Screen {
pub components: Vec<Box<dyn Draw>>,
}
impl Default for Screen {
fn default() -> Self {
Screen { components: vec![] }
}
}
impl Screen {
pub fn run(&self) {
for component in self.components.iter() {
component.draw();
}
}
pub fn add_component(&mut self, draw: Box<dyn Draw>) -> &mut Self {
self.components.push(draw);
self
}
}
#[derive(Debug)]
pub struct Button {
pub width: u32,
pub height: u32,
pub label: String,
}
impl Draw for Button {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
#[derive(Debug)]
struct SelectBox {
width: u32,
height: u32,
options: Vec<String>,
}
impl Draw for SelectBox {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
/// Implement the "state pattern" via trait objects.
trait State {
// "self: Box<Self>" means that the method is only valid when called on a Box holding the type.
fn request_review(self: Box<Self>) -> Box<dyn State>;
fn approve(self: Box<Self>) -> Box<dyn State>;
fn reject(self: Box<Self>) -> Box<dyn State>;
fn content<'a>(&self, _post: &'a Post) -> &'a str {
""
}
}
struct Draft {}
/// Now we can start seeing the advantages of the state pattern: the request_review method on
/// Post is the same no matter its state value. Each state is responsible for its own rules.
impl State for Draft {
fn request_review(self: Box<Self>) -> Box<dyn State> {
Box::new(PendingReview {})
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
}
struct PendingReview {}
impl State for PendingReview {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
Box::new(Published {})
}
fn reject(self: Box<Self>) -> Box<dyn State> {
Box::new(Draft {})
}
}
struct Published {}
impl State for Published {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
fn content<'a>(&self, post: &'a Post) -> &'a str {
post.content.as_ref()
}
}
pub struct Post {
/// To consume the old state, the request_review method needs to take ownership of the state
/// value. This is where the Option in the state field of Post comes in: we call the take
/// method to take the Some value out of the state field and leave a None in its place,
/// because Rust doesn’t let us have unpopulated fields in structs. This lets us move the
/// state value out of Post rather than borrowing it. Then we’ll set the post’s state value
/// to the result of this operation.
state: Option<Box<dyn State>>,
content: String,
}
/// Post knows nothing about the various behaviors. It replies on various State objects to do
/// their jobs.
impl Default for Post {
fn default() -> Self {
Self {
state: Some(Box::new(Draft {})),
content: String::new(),
}
}
}
impl Post {
// This behavior doesn’t depend on the state the post is in, so it’s not part of the state
// pattern. The add_text method doesn’t interact with the state field at all, but it is part
// of the behavior we want to support.
pub fn add_text(&mut self, text: &str) {
self.content.push_str(text);
}
pub fn content(&self) -> &str {
match &self.state {
Some(s) => s.content(self),
None => "",
}
}
pub fn request_review(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.request_review());
}
}
pub fn reject(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.reject());
}
}
pub fn approve(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.approve());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_min() {
assert_eq!(min(3, 5), 3);
assert_eq!(min(30, 5), 5);
}
#[test]
fn traits_need_to_be_in_scope() {
// The Write trait needs to be in scope. Otherwise, all its methods (such as "write_all")
// are hidden.
use std::io::Write;
let mut buf: Vec<u8> = vec![];
buf.write_all(b"hello").unwrap();
assert_eq!(5, buf.len());
// Note that only Vec<u8> implements the Write trait. So the code below doesn't work!
// let mut buf: Vec<String> = vec![];
// buf.write_all("hello world").unwrap();
// assert_eq!(11, buf.len());
}
#[test]
fn trait_objects() {
let mut buf: Vec<u8> = vec![];
// Rust doesn’t permit variables of type Write!
// This line doesn't compile because a variable's size has to be known at compile time and
// types that implement Write can be any size.
// let writer: Write = buf;
// A reference to a trait type, like writer, is a called a "trait object". Like any other
// reference, a trait object points to some value, it has a lifetime, and it can be either
// mut or shared. The size of a reference is fixed!
let _writer: &mut dyn Write = &mut buf;
// What makes a trait object different is that Rust usually doesn’t know the type of the
// referent at compile time. So a trait object includes a little extra information about
// the referent’s type. This is strictly for Rust’s own use behind the scenes: when you
// call writer.write(data), Rust needs the type information to dynamically call the right
// write method depending on the type of *writer. You can’t query the type information
// directly, and Rust does not support downcasting from the trait object &mut Write back to
// a concrete type like Vec<u8>. In other words, you can only work with the "generic type"
// of the trait itself.
//
// In memory, a trait object is a "fat pointer" consisting of two pointers:
// 1. data pointer: a pointer to the value, plus
// 2. vtable pointer: a pointer to a table representing that value's type.
// (Vec<u8> in this example)
//
// A vtable is essentially a struct of function pointers, pointing to the concrete piece of
// machine code for each method in the implementation. A method call like
// trait_object.method() will retrieve the correct pointer out of the vtable and then do a
// dynamic call of it.
// Rust automatically converts ordinary references into trait objects when needed. Let's
// say "say_hello" is a function that takes a "&mut Write", this works:
//
// let mut local_file: File = File::create("hello.txt")?;
// say_hello(&mut local_file)?; // Rust converts "&mut File" to "&mut Write"
// This kind of conversion is the only way to create a trait object. What the computer is
// actually doing here is very simple. At the point where the conversion happens, Rust
// knows the referent’s true type (in this case, File), so it just adds the address of the
// appropriate "vtable", turning the regular pointer into a fat pointer.
}
#[test]
fn test_generic_functions() {}
#[test]
fn test_top_ten() {
let names = vec![
String::from("Oakland"),
String::from("Oakland"),
String::from("Oakland"),
String::from("Alameda"),
String::from("San Francisco"),
String::from("San Francisco"),
];
let top10 = top_ten(&names);
assert_eq!(vec!["Oakland", "San Francisco", "Alameda"], top10);
}
#[test]
fn test_sheep() {
let mut sheep = Sheep {
naked: true,
name: "Dolly",
};
sheep.talk();
sheep.shear();
sheep.talk();
}
#[test]
fn return_trait_object() {
let animal = random_animal(0.3);
assert_eq!("baaaaaa?", animal.noise());
}
| pub struct Cow {}
impl Animal for Cow { | random_line_split |
traits.rs | . Clearly, this could lead to naming conflicts! But since Rust makes you import the
/// traits you plan to use, crates are free to take advantage of this superpower, and conflicts are
/// rare in practice.
///
/// The reason Clone and Iterator methods work without any special imports is that they’re always
/// in scope by default: they’re part of the standard prelude, names that Rust automatically
/// imports into every module. In fact, the prelude is mostly a carefully chosen selection of
/// traits.
///
///
/// ## when to use which (trait objects vs generic functions)
///
/// * dynamic dispatch: trait objects
/// * static dispatch: generic functions with trait bounds
///
/// How to understand "trait object"? Trait objects are very similar to how Java does dynamic
/// dispatch, ie "polymorphism". In Java, you can have references that point to various subtypes of
/// an interface. When you call methods on the reference, depending on the concrete subtype, a
/// different implemention may get invoked. That's called "dynamic dispatch". Trait objects are
/// equivalent to those references in Java and you can use "trait objects" to do dynamic dispatch.
///
/// Both features are based on traits. They have a lot in common but there are subtle differences.
///
/// 1. Trait objects are the right choice when you need a collection of values of mixed types, all together.
///
/// trait Vegetable {...}
///
/// struct Salad<V: Vegetable> {
/// veggies: Vec<V>
/// }
///
/// This works but each such salad consists entirely of a single type of vegetable.
///
/// struct Salad {
/// veggies: Vec<Vegetable> // error: `Vegetable` does not have
/// // a constant size
/// }
///
///
/// struct Salad {
/// veggies: Vec<Box<Vegetable>>
/// }
///
/// This code works because each Box<Vegetable> can own any type of vegetable, but the box itself
/// has a constant size—two pointers—suitable for storing in a vector.
///
/// 2. Another possible reason to use trait objects is to reduce the total amount of compiled code.
/// Rust may have to compile a generic function many times, once for each type it’s used with.
/// This could make the binary large, a phenomenon called code bloat in C++ circles.
///
/// ### when to use generic functions
///
/// Generics have two important advantages over trait objects, with the result that in Rust,
/// generics are the more common choice.
///
/// 1. The first advantage is speed. Each time the Rust compiler generates machine code for a
/// generic function, it knows which types it’s working with, so it knows at that time which
/// write method to call. This is called "static dispatch", in contrast to "dynamic dispatch".
///
/// Compare that to the behavior with trait objects. Rust never knows what type of value a trait
/// object points to until run time.
///
/// 2. The second advantage of generics is that not every trait can support trait objects. Traits
/// support several features, such as static methods, that work only with generics: they rule out
/// trait objects entirely.
///
/// You can only make "object-safe traits" into trait objects. Some complex rules govern all the
/// properties that make a trait object safe, but in practice, only two rules are relevant. A
/// trait is object safe if all the methods defined in the trait have the following properties:
///
/// * The return type isn’t Self.
/// * There are no generic type parameters.
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::io;
use std::io::Write;
use std::ops::Mul;
pub fn run() {
let tweet = Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
};
println!("1 new tweet: {}", tweet.summarize());
let article = NewsArticle {
headline: String::from("Make America Great Again"),
location: String::from("Washington DC"),
author: String::from("Trump"),
content: String::from("Make America Great Again"),
};
println!("1 news article: {}", article.summarize3());
notify(tweet);
notify2(article);
}
pub trait Summary {
fn summarize(&self) -> String;
// trait can have methods with default implementation
// this can be overridden by types that implement this trait
fn summarize2(&self) -> String {
String::from("(Read more...)")
}
// Default implementations can call other methods in the same trait, even if those other
// methods don’t have a default implementation. In this way, a trait can provide a lot of
// useful functionality and only require implementors to specify a small part of it.
// This is the "template pattern". The template itself is implemented in the trait while
// various hooks are implemented by the types themselves.
fn summarize3(&self) -> String {
format!("(Read more from {}...)", self.summarize_author())
}
fn summarize_author(&self) -> String;
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
impl Summary for NewsArticle {
fn summarize(&self) -> String {
format!("{}, by {} ({})", self.headline, self.author, self.location)
}
fn summarize_author(&self) -> String {
format!("by {}", self.author)
}
}
impl NewsArticle {
// You can't define this function in the "impl Summary for NewsArticle" block
// because it's not a function of the NewsArticle trait!
pub fn get_headline(&self) -> &String {
&self.headline
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
fn summarize_author(&self) -> String {
format!("@{}", self.username)
}
}
// traits as parameters
// this function can be called with any type that implements Summary
pub fn notify(item: impl Summary) {
println!("Breaking news! {}", item.summarize());
}
// "trait bound"
// this is equivalent to the function above, which is actually syntax sugar
pub fn notify2<T: Summary>(item: T) {
println!("Breaking news! {}", item.summarize());
}
pub trait Display {
fn show(&self) -> String;
}
// specify multiple traits using +
pub fn notify3<T: Summary + Display>(item: T) {
println!("Breaking news! {}", item.summarize());
println!("Show me the item: {}", item.show());
}
// "trait bound" using "where" clause between return type and open curly brace
// this is easier to read when you have many trait bounds
pub fn some_function<T, U>(_t: T, _u: U) -> i32
where
T: Display + Clone,
U: Clone + Summary,
{
99
}
// returning types that implement traits
pub fn returns_summarizable() -> impl Summary {
Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
}
}
// This is a plain function that takes a "trait object".
pub fn say_hello(out: &mut dyn Write) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// In contrast, this is a generic function whose type parameter W is bound by "Write" trait.
pub fn say_hello2<W: Write>(out: &mut W) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// Find the top occurring elements from a vector.
// This is how to special a type parameter that implements multiple traits.
pub fn top_ten<T>(values: &[T]) -> Vec<&T>
where
T: Debug + Hash + Eq,
{
let mut map = HashMap::new();
for value in values {
let counter = map.entry(value).or_insert(0);
*counter += 1;
}
let mut map_vec: Vec<_> = map.into_iter().collect();
map_vec.sort_by(|a, b| b.1.cmp(&a.1));
map_vec.into_iter().map(|a| a.0).take(10).collect()
}
pub trait Mapper {}
pub trait Reducer {}
pub trait Serialize {}
pub struct DataSet {}
// Generic functions can have multiple type parameters: M and R.
pub fn run_query<M: Mapper + Serialize, R: Reducer + Serialize>(
_data: &DataSet,
_map: M,
_reduce: R,
) {
unimplemented!()
}
// Alternative syntax: bounds can be specified in the where clause
pub fn run_query2<M, R>(_data: &DataSet, _map: M, _reduce: R)
where
M: Mapper + Serialize,
R: Reducer + Serialize,
{
unimplemented!()
}
pub trait MeasureDistance {}
// A generic function can have both lifetime parameters and type parameters. Lifetime parameters
// come first.
pub fn nearest<'t, 'c, P>(_target: &'t P, _candidates: &'c [P]) -> &'c P
where
P: MeasureDistance,
{
unimplemented!()
}
/// This is a generic function. It works with parameters that implement the "Ord" trait.
/// The compiler generates custom machine code for each type T that you actually use.
pub fn min<T: Ord>(m: T, n: T) -> T {
if m < n {
m
} else {
n
}
}
/// Rust lets you implement any trait on any type, as long as either the trait or the type is
/// introduced in the current crate. This means that any time you want to add a method to any type,
/// you can use a trait to do it. This is called an "extension trait".
pub trait IsEmoji {
fn is_emoji(&self) -> bool;
}
impl IsEmoji for char {
fn is_emoji(&self) -> bool {
unimplemented!()
}
}
/// We said earlier that when you implement a trait, either the trait or the type must be new in
/// the current crate. This is called the "coherence rule". It helps Rust ensure that trait
/// implementations are unique. Your code can’t "impl Write for u8", because both Write and u8 are
/// defined in the standard library. If Rust let crates do that, there could be multiple
/// implementations of Write for u8, in different crates, and Rust would have no reasonable way to
/// decide which implementation to use for a given method call.
///You can even use a generic impl block to add an extension trait to a whole family of types at once.
pub struct HtmlDocument {}
pub trait WriteHtml {
fn write_html(&mut self, html: &HtmlDocument) -> io::Result<()>;
}
/// You can write HTML to any std::io writer.
impl<W: Write> WriteHtml for W {
fn write_html(&mut self, _html: &HtmlDocument) -> io::Result<()> {
unimplemented!()
}
}
/// Self in traits
///
/// A trait can use the keyword Self as a type. It represents the trait itself.
pub trait MyClone {
fn clone(&self) -> Self;
}
/// Subtraits: we can define a trait is an extension of another trait
/// This means that every type that implements Creature must also implement the Display trait.
pub trait Creature: Display {
fn position(&self) -> (i32, i32);
}
// impl Display for Broom {}
// impl Creature for Broom {}
pub trait Animal {
// Instance methods
fn name(&self) -> &'static str;
fn noise(&self) -> &'static str;
// Traits can provide default implementation.
fn talk(&self) {
println!("{} says {}", self.name(), self.noise());
}
}
pub struct Sheep {
naked: bool,
name: &'static str,
}
impl Sheep {
fn is_naked(&self) -> bool {
self.naked
}
pub fn shear(&mut self) {
if self.is_naked() {
// You can call the trait method "name()" here because Sheep implements
// the Animal trait.
println!("{} is already naked...", self.name());
} else {
println!("{} gets a haircut!", self.name);
self.naked = true;
}
}
}
impl Animal for Sheep {
fn name(&self) -> &'static str {
self.name
}
fn noise(&self) -> &'static str {
if self.is_naked() {
"baaaaaa?"
} else {
"baaaaaa!"
}
}
// Default implementation can be overridden.
fn talk(&self) {
println!("{} pauses briefly... {}", self.name(), self.noise());
}
}
/// The compiler is capable of providing basic implementations for some traits via
/// the #[derive] attribute. The following is a list of derivable traits:
///
/// * Comparison traits: Eq, PartialEq, Ord, PartialOrd.
/// * Clone: to create T from &T via a copy.
/// * Copy: to give a type 'copy semantics' instead of'move semantics'.
/// * Hash: to compute a hash from &T.
/// * Default: to create an empty instance of a data type.
/// * Debug: to format a value using the {:?} formatter.
/// Returning Traits with "dyn"
///
/// https://doc.rust-lang.org/edition-guide/rust-2018/trait-system/dyn-trait-for-trait-objects.html
///
/// The Rust compiler needs to know how much space every function's return type requires. This
/// means all your functions have to return a concrete type. Unlike other languages, if you have a
/// trait like Animal, you can't write a function that returns Animal, because its different
/// implementations will need different amounts of memory.
///
/// However, there's an easy workaround. Instead of returning a trait object directly, our
/// functions return a Box which contains some Animal. A box is just a reference to some memory in
/// the heap. Because a reference has a statically-known size, and the compiler can guarantee it
/// points to a heap-allocated Animal, we can return a trait from our function!
///
/// Rust tries to be as explicit as possible whenever it allocates memory on the heap. So if your
/// function returns a pointer-to-trait-on-heap in this way, you need to write the return type with
/// the dyn keyword, e.g. Box<dyn Animal>.
pub struct Cow {}
impl Animal for Cow {
fn name(&self) -> &'static str {
"Dave"
}
fn noise(&self) -> &'static str {
"Moo"
}
}
pub fn random_animal(random_number: f64) -> Box<dyn Animal> {
if random_number < 0.5 {
Box::new(Sheep {
name: "Bob",
naked: true,
})
} else {
Box::new(Cow {})
}
}
/// Operator overloading
/// https://doc.rust-lang.org/core/ops/
/// In Rust, many of the operators can be overloaded via traits. That is, some operators can be used to accomplish different tasks based on their input arguments. This is possible because operators are syntactic sugar for method calls. For example, the + operator in a + b calls the add method (as in a.add(b)). This add method is part of the Add trait. Hence, the + operator can be used by any implementor of the Add trait.
pub struct Rectangle {
width: u32,
height: u32,
}
impl Mul<u32> for Rectangle {
type Output = Self;
fn mul(self, times: u32) -> Self::Output {
Rectangle {
width: self.width * times,
height: self.height * times,
}
}
}
/// impl Trait
/// If your function returns a type that implements MyTrait, you can write its return
/// type as -> impl MyTrait. This can help simplify your type signatures quite a lot!
pub fn combine_vecs(v: Vec<i32>, u: Vec<i32>) -> impl Iterator<Item = i32> {
// You could also write the following which is a lot more complicated.
// -> std::iter::Chain<std::vec::IntoIter<i32>, std::vec::IntoIter<i32>> {
v.into_iter().chain(u.into_iter())
}
/// More importantly, some Rust types can't be written out. For example, every
/// closure has its own unnamed concrete type. Before impl Trait syntax, you had
/// to allocate on the heap in order to return a closure. But now you can do it
/// all statically, like this:
pub fn make_adder(y: i32) -> impl Fn(i32) -> i32 {
move |x: i32| x + y
}
/// Polymorphism via trait objects
pub trait Draw {
fn draw(&self);
}
pub struct Screen {
pub components: Vec<Box<dyn Draw>>,
}
impl Default for Screen {
fn default() -> Self {
Screen { components: vec![] }
}
}
impl Screen {
pub fn run(&self) {
for component in self.components.iter() {
component.draw();
}
}
pub fn add_component(&mut self, draw: Box<dyn Draw>) -> &mut Self {
self.components.push(draw);
self
}
}
#[derive(Debug)]
pub struct Button {
pub width: u32,
pub height: u32,
pub label: String,
}
impl Draw for Button {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
#[derive(Debug)]
struct SelectBox {
width: u32,
height: u32,
options: Vec<String>,
}
impl Draw for SelectBox {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
/// Implement the "state pattern" via trait objects.
trait State {
// "self: Box<Self>" means that the method is only valid when called on a Box holding the type.
fn request_review(self: Box<Self>) -> Box<dyn State>;
fn approve(self: Box<Self>) -> Box<dyn State>;
fn reject(self: Box<Self>) -> Box<dyn State>;
fn content<'a>(&self, _post: &'a Post) -> &'a str {
""
}
}
struct Draft {}
/// Now we can start seeing the advantages of the state pattern: the request_review method on
/// Post is the same no matter its state value. Each state is responsible for its own rules.
impl State for Draft {
fn request_review(self: Box<Self>) -> Box<dyn State> {
Box::new(PendingReview {})
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
}
struct PendingReview {}
impl State for PendingReview {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
Box::new(Published {})
}
fn reject(self: Box<Self>) -> Box<dyn State> {
Box::new(Draft {})
}
}
struct Published {}
impl State for Published {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
fn content<'a>(&self, post: &'a Post) -> &'a str {
post.content.as_ref()
}
}
pub struct Post {
/// To consume the old state, the request_review method needs to take ownership of the state
/// value. This is where the Option in the state field of Post comes in: we call the take
/// method to take the Some value out of the state field and leave a None in its place,
/// because Rust doesn’t let us have unpopulated fields in structs. This lets us move the
/// state value out of Post rather than borrowing it. Then we’ll set the post’s state value
/// to the result of this operation.
state: Option<Box<dyn State>>,
content: String,
}
/// Post knows nothing about the various behaviors. It replies on various State objects to do
/// their jobs.
impl Default for Post {
fn default() -> Self {
Self {
state: Some(Box::new(Draft {})),
content: String::new(),
}
}
}
impl Post {
// This behavior doesn’t depend on the state the post is in, so it’s not part of the state
// pattern. The add_text method doesn’t interact with the state field at all, but it is part
// of the behavior we want to support.
pub fn add_text(&mut self, text: &str) {
self.content.push_str(text);
}
pub fn content(&self) -> &str {
match &self.state {
Some(s) => s.content(self),
None => "",
}
}
pub fn request_review(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.request_review());
}
}
pub fn reject(&mut self) {
if let Some(s) | f.state.take() {
self.state = Some(s.reject());
}
}
pub fn approve(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.approve());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_min() {
assert_eq!(min(3, 5), 3);
assert_eq!(min(30, 5), 5);
}
#[test]
fn traits_need_to_be_in_scope() {
// The Write trait needs to be in scope. Otherwise, all its methods (such as "write_all")
// are hidden.
use std::io::Write;
let mut buf: Vec<u8> = vec![];
buf.write_all(b"hello").unwrap();
assert_eq!(5, buf.len());
// Note that only Vec<u8> implements the Write trait. So the code below doesn't work!
// let mut buf: Vec<String> = vec![];
// buf.write_all("hello world").unwrap();
// assert_eq!(11, buf.len());
}
#[test]
fn trait_objects() {
let mut buf: Vec<u8> = vec![];
// Rust doesn’t permit variables of type Write!
// This line doesn't compile because a variable's size has to be known at compile time and
// types that implement Write can be any size.
// let writer: Write = buf;
// A reference to a trait type, like writer, is a called a "trait object". Like any other
// reference, a trait object points to some value, it has a lifetime, and it can be either
// mut or shared. The size of a reference is fixed!
let _writer: &mut dyn Write = &mut buf;
// What makes a trait object different is that Rust usually doesn’t know the type of the
// referent at compile time. So a trait object includes a little extra information about
// the referent’s type. This is strictly for Rust’s own use behind the scenes: when you
// call writer.write(data), Rust needs the type information to dynamically call the right
// write method depending on the type of *writer. You can’t query the type information
// directly, and Rust does not support downcasting from the trait object &mut Write back to
// a concrete type like Vec<u8>. In other words, you can only work with the "generic type"
// of the trait itself.
//
// In memory, a trait object is a "fat pointer" consisting of two pointers:
// 1. data pointer: a pointer to the value, plus
// 2. vtable pointer: a pointer to a table representing that value's type.
// (Vec<u8> in this example)
//
// A vtable is essentially a struct of function pointers, pointing to the concrete piece of
// machine code for each method in the implementation. A method call like
// trait_object.method() will retrieve the correct pointer out of the vtable and then do a
// dynamic call of it.
// Rust automatically converts ordinary references into trait objects when needed. Let's
// say "say_hello" is a function that takes a "&mut Write", this works:
//
// let mut local_file: File = File::create("hello.txt")?;
// say_hello(&mut local_file)?; // Rust converts "&mut File" to "&mut Write"
// This kind of conversion is the only way to create a trait object. What the computer is
// actually doing here is very simple. At the point where the conversion happens, Rust
// knows the referent’s true type (in this case, File), so it just adds the address of the
// appropriate "vtable", turning the regular pointer into a fat pointer.
}
#[test]
fn test_generic_functions() {}
#[test]
fn test_top_ten() {
let names = vec![
String::from("Oakland"),
String::from("Oakland"),
String::from("Oakland"),
String::from("Alameda"),
String::from("San Francisco"),
String::from("San Francisco"),
];
let top10 = top_ten(&names);
assert_eq!(vec!["Oakland", "San Francisco", "Alameda"], top10);
}
#[test]
fn test_sheep() {
let mut sheep = Sheep {
naked: true,
name: "Dolly",
};
sheep.talk();
sheep.shear();
sheep.talk();
}
#[test]
fn return_trait_object() {
let animal = random_animal(0.3);
assert_eq!("baaaaaa?", animal.noise());
}
| = sel | identifier_name |
traits.rs | Clearly, this could lead to naming conflicts! But since Rust makes you import the
/// traits you plan to use, crates are free to take advantage of this superpower, and conflicts are
/// rare in practice.
///
/// The reason Clone and Iterator methods work without any special imports is that they’re always
/// in scope by default: they’re part of the standard prelude, names that Rust automatically
/// imports into every module. In fact, the prelude is mostly a carefully chosen selection of
/// traits.
///
///
/// ## when to use which (trait objects vs generic functions)
///
/// * dynamic dispatch: trait objects
/// * static dispatch: generic functions with trait bounds
///
/// How to understand "trait object"? Trait objects are very similar to how Java does dynamic
/// dispatch, ie "polymorphism". In Java, you can have references that point to various subtypes of
/// an interface. When you call methods on the reference, depending on the concrete subtype, a
/// different implemention may get invoked. That's called "dynamic dispatch". Trait objects are
/// equivalent to those references in Java and you can use "trait objects" to do dynamic dispatch.
///
/// Both features are based on traits. They have a lot in common but there are subtle differences.
///
/// 1. Trait objects are the right choice when you need a collection of values of mixed types, all together.
///
/// trait Vegetable {...}
///
/// struct Salad<V: Vegetable> {
/// veggies: Vec<V>
/// }
///
/// This works but each such salad consists entirely of a single type of vegetable.
///
/// struct Salad {
/// veggies: Vec<Vegetable> // error: `Vegetable` does not have
/// // a constant size
/// }
///
///
/// struct Salad {
/// veggies: Vec<Box<Vegetable>>
/// }
///
/// This code works because each Box<Vegetable> can own any type of vegetable, but the box itself
/// has a constant size—two pointers—suitable for storing in a vector.
///
/// 2. Another possible reason to use trait objects is to reduce the total amount of compiled code.
/// Rust may have to compile a generic function many times, once for each type it’s used with.
/// This could make the binary large, a phenomenon called code bloat in C++ circles.
///
/// ### when to use generic functions
///
/// Generics have two important advantages over trait objects, with the result that in Rust,
/// generics are the more common choice.
///
/// 1. The first advantage is speed. Each time the Rust compiler generates machine code for a
/// generic function, it knows which types it’s working with, so it knows at that time which
/// write method to call. This is called "static dispatch", in contrast to "dynamic dispatch".
///
/// Compare that to the behavior with trait objects. Rust never knows what type of value a trait
/// object points to until run time.
///
/// 2. The second advantage of generics is that not every trait can support trait objects. Traits
/// support several features, such as static methods, that work only with generics: they rule out
/// trait objects entirely.
///
/// You can only make "object-safe traits" into trait objects. Some complex rules govern all the
/// properties that make a trait object safe, but in practice, only two rules are relevant. A
/// trait is object safe if all the methods defined in the trait have the following properties:
///
/// * The return type isn’t Self.
/// * There are no generic type parameters.
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::io;
use std::io::Write;
use std::ops::Mul;
pub fn run() {
let tweet = Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
};
println!("1 new tweet: {}", tweet.summarize());
let article = NewsArticle {
headline: String::from("Make America Great Again"),
location: String::from("Washington DC"),
author: String::from("Trump"),
content: String::from("Make America Great Again"),
};
println!("1 news article: {}", article.summarize3());
notify(tweet);
notify2(article);
}
pub trait Summary {
fn summarize(&self) -> String;
// trait can have methods with default implementation
// this can be overridden by types that implement this trait
fn summarize2(&self) -> String {
String::from("(Read more...)")
}
// Default implementations can call other methods in the same trait, even if those other
// methods don’t have a default implementation. In this way, a trait can provide a lot of
// useful functionality and only require implementors to specify a small part of it.
// This is the "template pattern". The template itself is implemented in the trait while
// various hooks are implemented by the types themselves.
fn summarize3(&self) -> String {
format!("(Read more from {}...)", self.summarize_author())
}
fn summarize_author(&self) -> String;
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
impl Summary for NewsArticle {
fn summarize(&self) -> String {
format!("{}, by {} ({})", self.headline, self.author, self.location)
}
fn summarize_author(&self) -> String {
format!("by {}", self.author)
}
}
impl NewsArticle {
// You can't define this function in the "impl Summary for NewsArticle" block
// because it's not a function of the NewsArticle trait!
pub fn get_headline(&self) -> &String {
&self.headline
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
fn summarize_author(&self) -> String {
format!("@{}", self.username)
}
}
// traits as parameters
// this function can be called with any type that implements Summary
pub fn notify(item: impl Summary) {
println!("Breaking news! {}", item.summarize());
}
// "trait bound"
// this is equivalent to the function above, which is actually syntax sugar
pub fn notify2<T: Summary>(item: T) {
println!("Breaking news! {}", item.summarize());
}
pub trait Display {
fn show(&self) -> String;
}
// specify multiple traits using +
pub fn notify3<T: Summary + Display>(item: T) {
println!("Breaking news! {}", item.summarize());
println!("Show me the item: {}", item.show());
}
// "trait bound" using "where" clause between return type and open curly brace
// this is easier to read when you have many trait bounds
pub fn some_function<T, U>(_t: T, _u: U) -> i32
where
T: Display + Clone,
U: Clone + Summary,
{
99
}
// returning types that implement traits
pub fn returns_summarizable() -> impl Summary {
Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
}
}
// This is a plain function that takes a "trait object".
pub fn say_hello(out: &mut dyn Write) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// In contrast, this is a generic function whose type parameter W is bound by "Write" trait.
pub fn say_hello2<W: Write>(out: &mut W) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// Find the top occurring elements from a vector.
// This is how to special a type parameter that implements multiple traits.
pub fn top_ten<T>(values: &[T]) -> Vec<&T>
where
T: Debug + Hash + Eq,
{
let mut map = HashMap::new();
for value in values {
let counter = map.entry(value).or_insert(0);
*counter += 1;
}
let mut map_vec: Vec<_> = map.into_iter().collect();
map_vec.sort_by(|a, b| b.1.cmp(&a.1));
map_vec.into_iter().map(|a| a.0).take(10).collect()
}
pub trait Mapper {}
pub trait Reducer {}
pub trait Serialize {}
pub struct DataSet {}
// Generic functions can have multiple type parameters: M and R.
pub fn run_query<M: Mapper + Serialize, R: Reducer + Serialize>(
_data: &DataSet,
_map: M,
_reduce: R,
) {
unimplemented!()
}
// Alternative syntax: bounds can be specified in the where clause
pub fn run_query2<M, R>(_data: &DataSet, _map: M, _reduce: R)
where
M: Mapper + Serialize,
R: Reducer + Serialize,
{
unimplemented!()
}
pub trait MeasureDistance {}
// A generic function can have both lifetime parameters and type parameters. Lifetime parameters
// come first.
pub fn nearest<'t, 'c, P>(_target: &'t P, _candidates: &'c [P]) -> &'c P
where
P: MeasureDistance,
{
unimplemented!()
}
/// This is a generic function. It works with parameters that implement the "Ord" trait.
/// The compiler generates custom machine code for each type T that you actually use.
pub fn min<T: Ord>(m: T, n: T) -> T {
if m < n {
m
} else {
n
}
}
/// Rus | ent any trait on any type, as long as either the trait or the type is
/// introduced in the current crate. This means that any time you want to add a method to any type,
/// you can use a trait to do it. This is called an "extension trait".
pub trait IsEmoji {
fn is_emoji(&self) -> bool;
}
impl IsEmoji for char {
fn is_emoji(&self) -> bool {
unimplemented!()
}
}
/// We said earlier that when you implement a trait, either the trait or the type must be new in
/// the current crate. This is called the "coherence rule". It helps Rust ensure that trait
/// implementations are unique. Your code can’t "impl Write for u8", because both Write and u8 are
/// defined in the standard library. If Rust let crates do that, there could be multiple
/// implementations of Write for u8, in different crates, and Rust would have no reasonable way to
/// decide which implementation to use for a given method call.
///You can even use a generic impl block to add an extension trait to a whole family of types at once.
pub struct HtmlDocument {}
pub trait WriteHtml {
fn write_html(&mut self, html: &HtmlDocument) -> io::Result<()>;
}
/// You can write HTML to any std::io writer.
impl<W: Write> WriteHtml for W {
fn write_html(&mut self, _html: &HtmlDocument) -> io::Result<()> {
unimplemented!()
}
}
/// Self in traits
///
/// A trait can use the keyword Self as a type. It represents the trait itself.
pub trait MyClone {
fn clone(&self) -> Self;
}
/// Subtraits: we can define a trait is an extension of another trait
/// This means that every type that implements Creature must also implement the Display trait.
pub trait Creature: Display {
fn position(&self) -> (i32, i32);
}
// impl Display for Broom {}
// impl Creature for Broom {}
pub trait Animal {
// Instance methods
fn name(&self) -> &'static str;
fn noise(&self) -> &'static str;
// Traits can provide default implementation.
fn talk(&self) {
println!("{} says {}", self.name(), self.noise());
}
}
pub struct Sheep {
naked: bool,
name: &'static str,
}
impl Sheep {
fn is_naked(&self) -> bool {
self.naked
}
pub fn shear(&mut self) {
if self.is_naked() {
// You can call the trait method "name()" here because Sheep implements
// the Animal trait.
println!("{} is already naked...", self.name());
} else {
println!("{} gets a haircut!", self.name);
self.naked = true;
}
}
}
impl Animal for Sheep {
fn name(&self) -> &'static str {
self.name
}
fn noise(&self) -> &'static str {
if self.is_naked() {
"baaaaaa?"
} else {
"baaaaaa!"
}
}
// Default implementation can be overridden.
fn talk(&self) {
println!("{} pauses briefly... {}", self.name(), self.noise());
}
}
/// The compiler is capable of providing basic implementations for some traits via
/// the #[derive] attribute. The following is a list of derivable traits:
///
/// * Comparison traits: Eq, PartialEq, Ord, PartialOrd.
/// * Clone: to create T from &T via a copy.
/// * Copy: to give a type 'copy semantics' instead of'move semantics'.
/// * Hash: to compute a hash from &T.
/// * Default: to create an empty instance of a data type.
/// * Debug: to format a value using the {:?} formatter.
/// Returning Traits with "dyn"
///
/// https://doc.rust-lang.org/edition-guide/rust-2018/trait-system/dyn-trait-for-trait-objects.html
///
/// The Rust compiler needs to know how much space every function's return type requires. This
/// means all your functions have to return a concrete type. Unlike other languages, if you have a
/// trait like Animal, you can't write a function that returns Animal, because its different
/// implementations will need different amounts of memory.
///
/// However, there's an easy workaround. Instead of returning a trait object directly, our
/// functions return a Box which contains some Animal. A box is just a reference to some memory in
/// the heap. Because a reference has a statically-known size, and the compiler can guarantee it
/// points to a heap-allocated Animal, we can return a trait from our function!
///
/// Rust tries to be as explicit as possible whenever it allocates memory on the heap. So if your
/// function returns a pointer-to-trait-on-heap in this way, you need to write the return type with
/// the dyn keyword, e.g. Box<dyn Animal>.
pub struct Cow {}
impl Animal for Cow {
fn name(&self) -> &'static str {
"Dave"
}
fn noise(&self) -> &'static str {
"Moo"
}
}
pub fn random_animal(random_number: f64) -> Box<dyn Animal> {
if random_number < 0.5 {
Box::new(Sheep {
name: "Bob",
naked: true,
})
} else {
Box::new(Cow {})
}
}
/// Operator overloading
/// https://doc.rust-lang.org/core/ops/
/// In Rust, many of the operators can be overloaded via traits. That is, some operators can be used to accomplish different tasks based on their input arguments. This is possible because operators are syntactic sugar for method calls. For example, the + operator in a + b calls the add method (as in a.add(b)). This add method is part of the Add trait. Hence, the + operator can be used by any implementor of the Add trait.
pub struct Rectangle {
width: u32,
height: u32,
}
impl Mul<u32> for Rectangle {
type Output = Self;
fn mul(self, times: u32) -> Self::Output {
Rectangle {
width: self.width * times,
height: self.height * times,
}
}
}
/// impl Trait
/// If your function returns a type that implements MyTrait, you can write its return
/// type as -> impl MyTrait. This can help simplify your type signatures quite a lot!
pub fn combine_vecs(v: Vec<i32>, u: Vec<i32>) -> impl Iterator<Item = i32> {
// You could also write the following which is a lot more complicated.
// -> std::iter::Chain<std::vec::IntoIter<i32>, std::vec::IntoIter<i32>> {
v.into_iter().chain(u.into_iter())
}
/// More importantly, some Rust types can't be written out. For example, every
/// closure has its own unnamed concrete type. Before impl Trait syntax, you had
/// to allocate on the heap in order to return a closure. But now you can do it
/// all statically, like this:
pub fn make_adder(y: i32) -> impl Fn(i32) -> i32 {
move |x: i32| x + y
}
/// Polymorphism via trait objects
pub trait Draw {
fn draw(&self);
}
pub struct Screen {
pub components: Vec<Box<dyn Draw>>,
}
impl Default for Screen {
fn default() -> Self {
Screen { components: vec![] }
}
}
impl Screen {
pub fn run(&self) {
for component in self.components.iter() {
component.draw();
}
}
pub fn add_component(&mut self, draw: Box<dyn Draw>) -> &mut Self {
self.components.push(draw);
self
}
}
#[derive(Debug)]
pub struct Button {
pub width: u32,
pub height: u32,
pub label: String,
}
impl Draw for Button {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
#[derive(Debug)]
struct SelectBox {
width: u32,
height: u32,
options: Vec<String>,
}
impl Draw for SelectBox {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
/// Implement the "state pattern" via trait objects.
trait State {
// "self: Box<Self>" means that the method is only valid when called on a Box holding the type.
fn request_review(self: Box<Self>) -> Box<dyn State>;
fn approve(self: Box<Self>) -> Box<dyn State>;
fn reject(self: Box<Self>) -> Box<dyn State>;
fn content<'a>(&self, _post: &'a Post) -> &'a str {
""
}
}
struct Draft {}
/// Now we can start seeing the advantages of the state pattern: the request_review method on
/// Post is the same no matter its state value. Each state is responsible for its own rules.
impl State for Draft {
fn request_review(self: Box<Self>) -> Box<dyn State> {
Box::new(PendingReview {})
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
}
struct PendingReview {}
impl State for PendingReview {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
Box::new(Published {})
}
fn reject(self: Box<Self>) -> Box<dyn State> {
Box::new(Draft {})
}
}
struct Published {}
impl State for Published {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
fn content<'a>(&self, post: &'a Post) -> &'a str {
post.content.as_ref()
}
}
pub struct Post {
/// To consume the old state, the request_review method needs to take ownership of the state
/// value. This is where the Option in the state field of Post comes in: we call the take
/// method to take the Some value out of the state field and leave a None in its place,
/// because Rust doesn’t let us have unpopulated fields in structs. This lets us move the
/// state value out of Post rather than borrowing it. Then we’ll set the post’s state value
/// to the result of this operation.
state: Option<Box<dyn State>>,
content: String,
}
/// Post knows nothing about the various behaviors. It replies on various State objects to do
/// their jobs.
impl Default for Post {
fn default() -> Self {
Self {
state: Some(Box::new(Draft {})),
content: String::new(),
}
}
}
impl Post {
// This behavior doesn’t depend on the state the post is in, so it’s not part of the state
// pattern. The add_text method doesn’t interact with the state field at all, but it is part
// of the behavior we want to support.
pub fn add_text(&mut self, text: &str) {
self.content.push_str(text);
}
pub fn content(&self) -> &str {
match &self.state {
Some(s) => s.content(self),
None => "",
}
}
pub fn request_review(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.request_review());
}
}
pub fn reject(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.reject());
}
}
pub fn approve(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.approve());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_min() {
assert_eq!(min(3, 5), 3);
assert_eq!(min(30, 5), 5);
}
#[test]
fn traits_need_to_be_in_scope() {
// The Write trait needs to be in scope. Otherwise, all its methods (such as "write_all")
// are hidden.
use std::io::Write;
let mut buf: Vec<u8> = vec![];
buf.write_all(b"hello").unwrap();
assert_eq!(5, buf.len());
// Note that only Vec<u8> implements the Write trait. So the code below doesn't work!
// let mut buf: Vec<String> = vec![];
// buf.write_all("hello world").unwrap();
// assert_eq!(11, buf.len());
}
#[test]
fn trait_objects() {
let mut buf: Vec<u8> = vec![];
// Rust doesn’t permit variables of type Write!
// This line doesn't compile because a variable's size has to be known at compile time and
// types that implement Write can be any size.
// let writer: Write = buf;
// A reference to a trait type, like writer, is a called a "trait object". Like any other
// reference, a trait object points to some value, it has a lifetime, and it can be either
// mut or shared. The size of a reference is fixed!
let _writer: &mut dyn Write = &mut buf;
// What makes a trait object different is that Rust usually doesn’t know the type of the
// referent at compile time. So a trait object includes a little extra information about
// the referent’s type. This is strictly for Rust’s own use behind the scenes: when you
// call writer.write(data), Rust needs the type information to dynamically call the right
// write method depending on the type of *writer. You can’t query the type information
// directly, and Rust does not support downcasting from the trait object &mut Write back to
// a concrete type like Vec<u8>. In other words, you can only work with the "generic type"
// of the trait itself.
//
// In memory, a trait object is a "fat pointer" consisting of two pointers:
// 1. data pointer: a pointer to the value, plus
// 2. vtable pointer: a pointer to a table representing that value's type.
// (Vec<u8> in this example)
//
// A vtable is essentially a struct of function pointers, pointing to the concrete piece of
// machine code for each method in the implementation. A method call like
// trait_object.method() will retrieve the correct pointer out of the vtable and then do a
// dynamic call of it.
// Rust automatically converts ordinary references into trait objects when needed. Let's
// say "say_hello" is a function that takes a "&mut Write", this works:
//
// let mut local_file: File = File::create("hello.txt")?;
// say_hello(&mut local_file)?; // Rust converts "&mut File" to "&mut Write"
// This kind of conversion is the only way to create a trait object. What the computer is
// actually doing here is very simple. At the point where the conversion happens, Rust
// knows the referent’s true type (in this case, File), so it just adds the address of the
// appropriate "vtable", turning the regular pointer into a fat pointer.
}
#[test]
fn test_generic_functions() {}
#[test]
fn test_top_ten() {
let names = vec![
String::from("Oakland"),
String::from("Oakland"),
String::from("Oakland"),
String::from("Alameda"),
String::from("San Francisco"),
String::from("San Francisco"),
];
let top10 = top_ten(&names);
assert_eq!(vec!["Oakland", "San Francisco", "Alameda"], top10);
}
#[test]
fn test_sheep() {
let mut sheep = Sheep {
naked: true,
name: "Dolly",
};
sheep.talk();
sheep.shear();
sheep.talk();
}
#[test]
fn return_trait_object() {
let animal = random_animal(0.3);
assert_eq!("baaaaaa?", animal.noise());
| t lets you implem | conditional_block |
traits.rs | . Clearly, this could lead to naming conflicts! But since Rust makes you import the
/// traits you plan to use, crates are free to take advantage of this superpower, and conflicts are
/// rare in practice.
///
/// The reason Clone and Iterator methods work without any special imports is that they’re always
/// in scope by default: they’re part of the standard prelude, names that Rust automatically
/// imports into every module. In fact, the prelude is mostly a carefully chosen selection of
/// traits.
///
///
/// ## when to use which (trait objects vs generic functions)
///
/// * dynamic dispatch: trait objects
/// * static dispatch: generic functions with trait bounds
///
/// How to understand "trait object"? Trait objects are very similar to how Java does dynamic
/// dispatch, ie "polymorphism". In Java, you can have references that point to various subtypes of
/// an interface. When you call methods on the reference, depending on the concrete subtype, a
/// different implemention may get invoked. That's called "dynamic dispatch". Trait objects are
/// equivalent to those references in Java and you can use "trait objects" to do dynamic dispatch.
///
/// Both features are based on traits. They have a lot in common but there are subtle differences.
///
/// 1. Trait objects are the right choice when you need a collection of values of mixed types, all together.
///
/// trait Vegetable {...}
///
/// struct Salad<V: Vegetable> {
/// veggies: Vec<V>
/// }
///
/// This works but each such salad consists entirely of a single type of vegetable.
///
/// struct Salad {
/// veggies: Vec<Vegetable> // error: `Vegetable` does not have
/// // a constant size
/// }
///
///
/// struct Salad {
/// veggies: Vec<Box<Vegetable>>
/// }
///
/// This code works because each Box<Vegetable> can own any type of vegetable, but the box itself
/// has a constant size—two pointers—suitable for storing in a vector.
///
/// 2. Another possible reason to use trait objects is to reduce the total amount of compiled code.
/// Rust may have to compile a generic function many times, once for each type it’s used with.
/// This could make the binary large, a phenomenon called code bloat in C++ circles.
///
/// ### when to use generic functions
///
/// Generics have two important advantages over trait objects, with the result that in Rust,
/// generics are the more common choice.
///
/// 1. The first advantage is speed. Each time the Rust compiler generates machine code for a
/// generic function, it knows which types it’s working with, so it knows at that time which
/// write method to call. This is called "static dispatch", in contrast to "dynamic dispatch".
///
/// Compare that to the behavior with trait objects. Rust never knows what type of value a trait
/// object points to until run time.
///
/// 2. The second advantage of generics is that not every trait can support trait objects. Traits
/// support several features, such as static methods, that work only with generics: they rule out
/// trait objects entirely.
///
/// You can only make "object-safe traits" into trait objects. Some complex rules govern all the
/// properties that make a trait object safe, but in practice, only two rules are relevant. A
/// trait is object safe if all the methods defined in the trait have the following properties:
///
/// * The return type isn’t Self.
/// * There are no generic type parameters.
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::io;
use std::io::Write;
use std::ops::Mul;
pub fn run() {
let tweet = Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
};
println!("1 new tweet: {}", tweet.summarize());
let article = NewsArticle {
headline: String::from("Make America Great Again"),
location: String::from("Washington DC"),
author: String::from("Trump"),
content: String::from("Make America Great Again"),
};
println!("1 news article: {}", article.summarize3());
notify(tweet);
notify2(article);
}
pub trait Summary {
fn summarize(&self) -> String;
// trait can have methods with default implementation
// this can be overridden by types that implement this trait
fn summarize2(&self) -> String {
String::from("(Read more...)")
}
// Default implementations can call other methods in the same trait, even if those other
// methods don’t have a default implementation. In this way, a trait can provide a lot of
// useful functionality and only require implementors to specify a small part of it.
// This is the "template pattern". The template itself is implemented in the trait while
// various hooks are implemented by the types themselves.
fn summarize3(&self) -> String {
format!("(Read more from {}...)", self.summarize_author())
}
fn summarize_author(&self) -> String;
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
impl Summary for NewsArticle {
fn summarize(&self) -> String {
format!("{}, by {} ({})", self.headline, self.author, self.location)
}
fn summarize_author(&self) -> String {
format!("by {}", self.author)
}
}
impl NewsArticle {
// You can't define this function in the "impl Summary for NewsArticle" block
// because it's not a function of the NewsArticle trait!
pub fn get_headline(&self) -> &String {
&self.headline
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
fn summarize_author(&self) -> String {
format!("@{}", self.username)
}
}
// traits as parameters
// this function can be called with any type that implements Summary
pub fn notify(item: impl Summary) {
println!("Breaking news! {}", item.summarize());
}
// "trait bound"
// this is equivalent to the function above, which is actually syntax sugar
pub fn notify2<T: Summary>(item: T) {
println!("Breaking news! {}", item.summarize());
}
pub trait Display {
fn show(&self) -> String;
}
// specify multiple traits using +
pub fn notify3<T: Summary + Display>(item: T) {
println!("Breaking news! {}", item.summarize());
println!("Show me the item: {}", item.show());
}
// "trait bound" using "where" clause between return type and open curly brace
// this is easier to read when you have many trait bounds
pub fn some_function<T, U>(_t: T, _u: U) -> i32
where
T: Display + Clone,
U: Clone + Summary,
{
99
}
// returning types that implement traits
pub fn returns_summarizable() -> impl Summary {
Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
}
}
// This is a plain function that takes a "trait object".
pub fn say_hello(out: &mut dyn Write) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// In contrast, this is a generic function whose type parameter W is bound by "Write" trait.
pub fn say_hello2<W: Write>(out: &mut W) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// Find the top occurring elements from a vector.
// This is how to special a type parameter that implements multiple traits.
pub fn top_ten<T>(values: &[T]) -> Vec<&T>
where
T: Debug + Hash + Eq,
{
let mut map = HashMap::new();
for value in values {
let counter = map.entry(value).or_insert(0);
*counter += 1;
}
let mut map_vec: Vec<_> = map.into_iter().collect();
map_vec.sort_by(|a, b| b.1.cmp(&a.1));
map_vec.into_iter().map(|a| a.0).take(10).collect()
}
pub trait Mapper {}
pub trait Reducer {}
pub trait Serialize {}
pub struct DataSet {}
// Generic functions can have multiple type parameters: M and R.
pub fn run_query<M: Mapper + Serialize, R: Reducer + Serialize>(
_data: &DataSet,
_map: M,
_reduce: R,
) {
unimplemented!()
}
// Alternative syntax: bounds can be specified in the where clause
pub fn run_query2<M, R>(_data: &DataSet, _map: M, _reduce: R)
where
M: Mapper + Serialize,
R: Reducer + Serialize,
{
unimplemented!()
}
pub trait MeasureDistance {}
// A generic function can have both lifetime parameters and type parameters. Lifetime parameters
// come first.
pub fn nearest<'t, 'c, P>(_target: &'t P, _candidates: &'c [P]) -> &'c P
where
P: MeasureDistance,
{
unimplemented!()
}
/// This is a generic function. It works with parameters that implement the "Ord" trait.
/// The compiler generates custom machine code for each type T that you actually use.
pub fn min<T: Ord>(m: T, n: T) -> T {
if m < n {
m
} else {
n
}
}
/// Rust lets you implement any trait on any type, as long as either the trait or the type is
/// introduced in the current crate. This means that any time you want to add a method to any type,
/// you can use a trait to do it. This is called an "extension trait".
pub trait IsEmoji {
fn is_emoji(&self) -> bool;
}
impl IsEmoji for char {
fn is_emoji(&self) -> bool {
unimplemented!()
}
}
/// We said earlier that when you implement a trait, either the trait or the type must be new in
/// the current crate. This is called the "coherence rule". It helps Rust ensure that trait
/// implementations are unique. Your code can’t "impl Write for u8", because both Write and u8 are
/// defined in the standard library. If Rust let crates do that, there could be multiple
/// implementations of Write for u8, in different crates, and Rust would have no reasonable way to
/// decide which implementation to use for a given method call.
///You can even use a generic impl block to add an extension trait to a whole family of types at once.
pub struct HtmlDocument {}
pub trait WriteHtml {
fn write_html(&mut self, html: &HtmlDocument) -> io::Result<()>;
}
/// You can write HTML to any std::io writer.
impl<W: Write> WriteHtml for W {
fn write_html(&mut self, _html: &HtmlDocument) -> io::Result<()> {
unimplemented!()
}
}
/// Self in traits
///
/// A trait can use the keyword Self as a type. It represents the trait itself.
pub trait MyClone {
fn clone(&self) -> Self;
}
/// Subtraits: we can define a trait is an extension of another trait
/// This means that every type that implements Creature must also implement the Display trait.
pub trait Creature: Display {
fn position(&self) -> (i32, i32);
}
// impl Display for Broom {}
// impl Creature for Broom {}
pub trait Animal {
// Instance methods
fn name(&self) -> &'static str;
fn noise(&self) -> &'static str;
// Traits can provide default implementation.
fn talk(&self) {
println!("{} says {}", self.name(), self.noise());
}
}
pub struct Sheep {
naked: bool,
name: &'static str,
}
impl Sheep {
fn is_naked(&self) -> bool {
self.naked
}
pub fn shear(&mut self) {
if self.is_naked() {
// You can call the trait method "name()" here because Sheep implements
// the Animal trait.
println!("{} is already naked...", self.name());
} else {
println!("{} gets a haircut!", self.name);
self.naked = true;
}
}
}
impl Animal for Sheep {
fn name(&self) -> &'static str {
self.name
}
fn noise(&self) -> &'static str {
if self.is_naked() {
"baaaaaa?"
} else {
"baaaaaa!"
}
}
// Default implementation can be overridden.
fn talk(&self) {
println!("{} pauses briefly... {}", self.name(), self.noise());
}
}
/// The compiler is capable of providing basic implementations for some traits via
/// the #[derive] attribute. The following is a list of derivable traits:
///
/// * Comparison traits: Eq, PartialEq, Ord, PartialOrd.
/// * Clone: to create T from &T via a copy.
/// * Copy: to give a type 'copy semantics' instead of'move semantics'.
/// * Hash: to compute a hash from &T.
/// * Default: to create an empty instance of a data type.
/// * Debug: to format a value using the {:?} formatter.
/// Returning Traits with "dyn"
///
/// https://doc.rust-lang.org/edition-guide/rust-2018/trait-system/dyn-trait-for-trait-objects.html
///
/// The Rust compiler needs to know how much space every function's return type requires. This
/// means all your functions have to return a concrete type. Unlike other languages, if you have a
/// trait like Animal, you can't write a function that returns Animal, because its different
/// implementations will need different amounts of memory.
///
/// However, there's an easy workaround. Instead of returning a trait object directly, our
/// functions return a Box which contains some Animal. A box is just a reference to some memory in
/// the heap. Because a reference has a statically-known size, and the compiler can guarantee it
/// points to a heap-allocated Animal, we can return a trait from our function!
///
/// Rust tries to be as explicit as possible whenever it allocates memory on the heap. So if your
/// function returns a pointer-to-trait-on-heap in this way, you need to write the return type with
/// the dyn keyword, e.g. Box<dyn Animal>.
pub struct Cow {}
impl Animal for Cow {
fn name(&self) -> &'static str {
"Dave"
}
fn noise(&self) -> &'static str {
"Moo"
}
}
pub fn random_animal(random_number: f64) -> Box<dyn Animal> {
if random_number < 0.5 {
Box::new(Sheep {
name: "Bob",
naked: true,
})
} else {
Box::new(Cow {})
}
}
/// Operator overloading
/// https://doc.rust-lang.org/core/ops/
/// In Rust, many of the operators can be overloaded via traits. That is, some operators can be used to accomplish different tasks based on their input arguments. This is possible because operators are syntactic sugar for method calls. For example, the + operator in a + b calls the add method (as in a.add(b)). This add method is part of the Add trait. Hence, the + operator can be used by any implementor of the Add trait.
pub struct Rectangle {
width: u32,
height: u32,
}
impl Mul<u32> for Rectangle {
type Output = Self;
fn mul(self, times: u32) -> Self::Output {
Rectangle {
width: self.width * times,
height: self.height * times,
}
}
}
/// impl Trait
/// If your function returns a type that implements MyTrait, you can write its return
/// type as -> impl MyTrait. This can help simplify your type signatures quite a lot!
pub fn combine_vecs(v: Vec<i32>, u: Vec<i32>) -> impl Iterator<Item = i32> {
// You could also write the following which is a lot more complicated.
// -> std::iter::Chain<std::vec::IntoIter<i32>, std::vec::IntoIter<i32>> {
v.into_iter().chain(u.into_iter())
}
/// More importantly, some Rust types can't be written out. For example, every
/// closure has its own unnamed concrete type. Before impl Trait syntax, you had
/// to allocate on the heap in order to return a closure. But now you can do it
/// all statically, like this:
pub fn make_adder(y: i32) -> impl Fn(i32) -> i32 {
move |x: i32| x + y
}
/// Polymorphism via trait objects
pub trait Draw {
fn draw(&self);
}
pub struct Screen {
pub components: Vec<Box<dyn Draw>>,
}
impl Default for Screen {
fn default() -> Self {
Screen { components: vec![] }
}
}
impl Screen {
pub fn run(&self) {
for component in self.components.iter() {
component.draw();
}
}
pub fn add_component(&mut self, draw: Box<dyn Draw>) -> &mut Self {
self.components.push(draw);
self
}
}
#[derive(Debug)]
pub struct Button {
pub width: u32,
pub height: u32,
pub label: String,
}
impl Draw for Button {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
#[derive(Debug)]
struct SelectBox {
width: u32,
height: u32,
options: Vec<String>,
}
impl Draw for SelectBox {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
/// Implement the "state pattern" via trait objects.
trait State {
// "self: Box<Self>" means that the method is only valid when called on a Box holding the type.
fn request_review(self: Box<Self>) -> Box<dyn State>;
fn approve(self: Box<Self>) -> Box<dyn State>;
fn reject(self: Box<Self>) -> Box<dyn State>;
fn content<'a>(&self, _post: &'a Post) -> &'a str {
""
}
}
struct Draft {}
/// Now we can start seeing the advantages of the state pattern: the request_review method on
/// Post is the same no matter its state value. Each state is responsible for its own rules.
impl State for Draft {
fn request_review(self: Box<Self>) -> Box<dyn State> {
Box::new(PendingReview {})
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
}
struct PendingReview {}
impl State for PendingReview {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
Box::new(Published {})
}
fn reject(self: Box<Self>) -> Box<dyn State> {
Box::new(Draft {})
| State for Published {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
fn content<'a>(&self, post: &'a Post) -> &'a str {
post.content.as_ref()
}
}
pub struct Post {
/// To consume the old state, the request_review method needs to take ownership of the state
/// value. This is where the Option in the state field of Post comes in: we call the take
/// method to take the Some value out of the state field and leave a None in its place,
/// because Rust doesn’t let us have unpopulated fields in structs. This lets us move the
/// state value out of Post rather than borrowing it. Then we’ll set the post’s state value
/// to the result of this operation.
state: Option<Box<dyn State>>,
content: String,
}
/// Post knows nothing about the various behaviors. It replies on various State objects to do
/// their jobs.
impl Default for Post {
fn default() -> Self {
Self {
state: Some(Box::new(Draft {})),
content: String::new(),
}
}
}
impl Post {
// This behavior doesn’t depend on the state the post is in, so it’s not part of the state
// pattern. The add_text method doesn’t interact with the state field at all, but it is part
// of the behavior we want to support.
pub fn add_text(&mut self, text: &str) {
self.content.push_str(text);
}
pub fn content(&self) -> &str {
match &self.state {
Some(s) => s.content(self),
None => "",
}
}
pub fn request_review(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.request_review());
}
}
pub fn reject(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.reject());
}
}
pub fn approve(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.approve());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_min() {
assert_eq!(min(3, 5), 3);
assert_eq!(min(30, 5), 5);
}
#[test]
fn traits_need_to_be_in_scope() {
// The Write trait needs to be in scope. Otherwise, all its methods (such as "write_all")
// are hidden.
use std::io::Write;
let mut buf: Vec<u8> = vec![];
buf.write_all(b"hello").unwrap();
assert_eq!(5, buf.len());
// Note that only Vec<u8> implements the Write trait. So the code below doesn't work!
// let mut buf: Vec<String> = vec![];
// buf.write_all("hello world").unwrap();
// assert_eq!(11, buf.len());
}
#[test]
fn trait_objects() {
let mut buf: Vec<u8> = vec![];
// Rust doesn’t permit variables of type Write!
// This line doesn't compile because a variable's size has to be known at compile time and
// types that implement Write can be any size.
// let writer: Write = buf;
// A reference to a trait type, like writer, is a called a "trait object". Like any other
// reference, a trait object points to some value, it has a lifetime, and it can be either
// mut or shared. The size of a reference is fixed!
let _writer: &mut dyn Write = &mut buf;
// What makes a trait object different is that Rust usually doesn’t know the type of the
// referent at compile time. So a trait object includes a little extra information about
// the referent’s type. This is strictly for Rust’s own use behind the scenes: when you
// call writer.write(data), Rust needs the type information to dynamically call the right
// write method depending on the type of *writer. You can’t query the type information
// directly, and Rust does not support downcasting from the trait object &mut Write back to
// a concrete type like Vec<u8>. In other words, you can only work with the "generic type"
// of the trait itself.
//
// In memory, a trait object is a "fat pointer" consisting of two pointers:
// 1. data pointer: a pointer to the value, plus
// 2. vtable pointer: a pointer to a table representing that value's type.
// (Vec<u8> in this example)
//
// A vtable is essentially a struct of function pointers, pointing to the concrete piece of
// machine code for each method in the implementation. A method call like
// trait_object.method() will retrieve the correct pointer out of the vtable and then do a
// dynamic call of it.
// Rust automatically converts ordinary references into trait objects when needed. Let's
// say "say_hello" is a function that takes a "&mut Write", this works:
//
// let mut local_file: File = File::create("hello.txt")?;
// say_hello(&mut local_file)?; // Rust converts "&mut File" to "&mut Write"
// This kind of conversion is the only way to create a trait object. What the computer is
// actually doing here is very simple. At the point where the conversion happens, Rust
// knows the referent’s true type (in this case, File), so it just adds the address of the
// appropriate "vtable", turning the regular pointer into a fat pointer.
}
#[test]
fn test_generic_functions() {}
#[test]
fn test_top_ten() {
let names = vec![
String::from("Oakland"),
String::from("Oakland"),
String::from("Oakland"),
String::from("Alameda"),
String::from("San Francisco"),
String::from("San Francisco"),
];
let top10 = top_ten(&names);
assert_eq!(vec!["Oakland", "San Francisco", "Alameda"], top10);
}
#[test]
fn test_sheep() {
let mut sheep = Sheep {
naked: true,
name: "Dolly",
};
sheep.talk();
sheep.shear();
sheep.talk();
}
#[test]
fn return_trait_object() {
let animal = random_animal(0.3);
assert_eq!("baaaaaa?", animal.noise());
}
| }
}
struct Published {}
impl | identifier_body |
message.rs | //! Definitions of network messages.
use std::error::Error;
use std::{net, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::block::{Block, BlockHeader, BlockHeaderHash};
use zebra_chain::{transaction::Transaction, types::BlockHeight};
use super::inv::InventoryHash;
use super::types::*;
use crate::meta_addr::MetaAddr;
/// A Bitcoin-like network message for the Zcash protocol.
///
/// The Zcash network protocol is mostly inherited from Bitcoin, and a list of
/// Bitcoin network messages can be found [on the Bitcoin
/// wiki][btc_wiki_protocol].
///
/// That page describes the wire format of the messages, while this enum stores
/// an internal representation. The internal representation is unlinked from the
/// wire format, and the translation between the two happens only during
/// serialization and deserialization. For instance, Bitcoin identifies messages
/// by a 12-byte ascii command string; we consider this a serialization detail
/// and use the enum discriminant instead. (As a side benefit, this also means
/// that we have a clearly-defined validation boundary for network messages
/// during serialization).
///
/// [btc_wiki_protocol]: https://en.bitcoin.it/wiki/Protocol_documentation
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Message {
/// A `version` message.
///
/// Note that although this is called `version` in Bitcoin, its role is really
/// analogous to a `ClientHello` message in TLS, used to begin a handshake, and
/// is distinct from a simple version number.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#version)
Version {
/// The network version number supported by the sender.
version: Version,
/// The network services advertised by the sender.
services: PeerServices,
/// The time when the version message was sent.
timestamp: DateTime<Utc>,
/// The network address of the node receiving this message, and its
/// advertised network services.
///
/// Q: how does the handshake know the remote peer's services already?
address_recv: (PeerServices, net::SocketAddr),
/// The network address of the node sending this message, and its
/// advertised network services.
address_from: (PeerServices, net::SocketAddr),
/// Node random nonce, randomly generated every time a version
/// packet is sent. This nonce is used to detect connections
/// to self.
nonce: Nonce,
/// The Zcash user agent advertised by the sender.
user_agent: String,
/// The last block received by the emitting node.
start_height: BlockHeight,
/// Whether the remote peer should announce relayed
/// transactions or not, see [BIP 0037](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki)
relay: bool,
},
/// A `verack` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#verack)
Verack,
/// A `ping` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#ping)
Ping(
/// A nonce unique to this [`Ping`] message.
Nonce,
),
/// A `pong` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#pong)
Pong(
/// The nonce from the [`Ping`] message this was in response to.
Nonce,
),
/// A `reject` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#reject)
Reject {
/// Type of message rejected.
// It's unclear if this is strictly limited to message command
// codes, so leaving it a String.
message: String,
/// RejectReason code relating to rejected message.
ccode: RejectReason,
/// Human-readable version of rejection reason.
reason: String,
/// Optional extra data provided for some errors.
// Currently, all errors which provide this field fill it with
// the TXID or block header hash of the object being rejected,
// so the field is 32 bytes.
//
// Q: can we tell Rust that this field is optional? Or just
// default its value to an empty array, I guess.
data: Option<[u8; 32]>,
},
/// An `addr` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#addr)
Addr(Vec<MetaAddr>),
/// A `getaddr` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getaddr)
GetAddr,
/// A `block` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#block)
Block(Arc<Block>),
/// A `getblocks` message.
///
/// Requests the list of blocks starting right after the last
/// known hash in `block_locator_hashes`, up to `hash_stop` or 500
/// blocks, whichever comes first.
///
/// You can send in fewer known hashes down to a minimum of just
/// one hash. However, the purpose of the block locator object is
/// to detect a wrong branch in the caller's main chain. If the
/// peer detects that you are off the main chain, it will send in
/// block hashes which are earlier than your last known block. So
/// if you just send in your last known hash and it is off the
/// main chain, the peer starts over at block #1.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getblocks)
// The locator hashes are processed by a node in the order as they
// appear in the message. If a block hash is found in the node's
// main chain, the list of its children is returned back via the
// inv message and the remaining locators are ignored, no matter
// if the requested limit was reached, or not.
//
// The 500 headers number is from the Bitcoin docs, we are not
// certain (yet) that other implementations of Zcash obey this
// restriction, or if they don't, what happens if we send them too
// many results.
GetBlocks {
/// Block locators, from newest back to genesis block.
block_locator_hashes: Vec<BlockHeaderHash>,
/// `BlockHeaderHash` of the last desired block.
///
/// Set to zero to get as many blocks as possible (500).
hash_stop: BlockHeaderHash,
},
/// A `headers` message.
///
/// Returns block headers in response to a getheaders packet.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#headers)
// Note that the block headers in this packet include a
// transaction count (a var_int, so there can be more than 81
// bytes per header) as opposed to the block headers that are
// hashed by miners.
Headers(Vec<BlockHeader>),
/// A `getheaders` message.
///
/// Requests a series of block headers starting right after the
/// last known hash in `block_locator_hashes`, up to `hash_stop`
/// or 2000 blocks, whichever comes first.
///
/// You can send in fewer known hashes down to a minimum of just
/// one hash. However, the purpose of the block locator object is
/// to detect a wrong branch in the caller's main chain. If the
/// peer detects that you are off the main chain, it will send in
/// block hashes which are earlier than your last known block. So
/// if you just send in your last known hash and it is off the
/// main chain, the peer starts over at block #1.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getheaders)
// The 2000 headers number is from the Bitcoin docs, we are not
// certain (yet) that other implementations of Zcash obey this
// restriction, or if they don't, what happens if we send them too
// many results.
GetHeaders {
/// Block locators, from newest back to genesis block.
block_locator_hashes: Vec<BlockHeaderHash>,
/// `BlockHeaderHash` of the last desired block header.
///
/// Set to zero to get as many block headers as possible (2000).
hash_stop: BlockHeaderHash,
},
/// An `inv` message.
///
/// Allows a node to advertise its knowledge of one or more
/// objects. It can be received unsolicited, or in reply to
/// `getblocks`.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#inv)
Inv(Vec<InventoryHash>),
/// A `getdata` message.
///
/// `getdata` is used in response to `inv`, to retrieve the
/// content of a specific object, and is usually sent after
/// receiving an `inv` packet, after filtering known elements.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getdata)
GetData(Vec<InventoryHash>),
/// A `notfound` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#notfound)
// See note above on `Inventory`.
NotFound(Vec<InventoryHash>),
/// A `tx` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#tx)
Tx(Arc<Transaction>),
/// A `mempool` message.
///
/// This was defined in [BIP35], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#mempool)
/// [BIP35]: https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki
Mempool,
/// A `filterload` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterLoad {
/// The filter itself is simply a bit field of arbitrary
/// byte-aligned size. The maximum size is 36,000 bytes.
filter: Filter,
/// The number of hash functions to use in this filter. The
/// maximum value allowed in this field is 50.
hash_functions_count: u32,
/// A random value to add to the seed value in the hash
/// function used by the bloom filter.
tweak: Tweak,
/// A set of flags that control how matched items are added to the filter.
flags: u8,
},
/// A `filteradd` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterAdd {
/// The data element to add to the current filter.
// The data field must be smaller than or equal to 520 bytes
// in size (the maximum size of any potentially matched
// object).
//
// A Vec instead of [u8; 520] because of needed traits.
data: Vec<u8>,
},
/// A `filterclear` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterClear,
}
impl<E> From<E> for Message
where
E: Error,
{
fn | (e: E) -> Self {
Message::Reject {
message: e.to_string(),
// The generic case, impls for specific error types should
// use specific varieties of `RejectReason`.
ccode: RejectReason::Other,
reason: e.source().unwrap().to_string(),
// Allow this to be overridden but not populated by default, methinks.
data: None,
}
}
}
/// Reject Reason CCodes
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#reject)
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[repr(u8)]
#[allow(missing_docs)]
pub enum RejectReason {
Malformed = 0x01,
Invalid = 0x10,
Obsolete = 0x11,
Duplicate = 0x12,
Nonstandard = 0x40,
Dust = 0x41,
InsufficientFee = 0x42,
Checkpoint = 0x43,
Other = 0x50,
}
| from | identifier_name |
message.rs | //! Definitions of network messages.
use std::error::Error;
use std::{net, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::block::{Block, BlockHeader, BlockHeaderHash};
use zebra_chain::{transaction::Transaction, types::BlockHeight};
use super::inv::InventoryHash;
use super::types::*;
use crate::meta_addr::MetaAddr;
/// A Bitcoin-like network message for the Zcash protocol.
///
/// The Zcash network protocol is mostly inherited from Bitcoin, and a list of
/// Bitcoin network messages can be found [on the Bitcoin
/// wiki][btc_wiki_protocol].
///
/// That page describes the wire format of the messages, while this enum stores
/// an internal representation. The internal representation is unlinked from the
/// wire format, and the translation between the two happens only during
/// serialization and deserialization. For instance, Bitcoin identifies messages
/// by a 12-byte ascii command string; we consider this a serialization detail
/// and use the enum discriminant instead. (As a side benefit, this also means
/// that we have a clearly-defined validation boundary for network messages
/// during serialization).
///
/// [btc_wiki_protocol]: https://en.bitcoin.it/wiki/Protocol_documentation
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Message {
/// A `version` message.
///
/// Note that although this is called `version` in Bitcoin, its role is really
/// analogous to a `ClientHello` message in TLS, used to begin a handshake, and
/// is distinct from a simple version number.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#version)
Version {
/// The network version number supported by the sender.
version: Version,
/// The network services advertised by the sender.
services: PeerServices,
/// The time when the version message was sent.
timestamp: DateTime<Utc>,
/// The network address of the node receiving this message, and its
/// advertised network services.
///
/// Q: how does the handshake know the remote peer's services already?
address_recv: (PeerServices, net::SocketAddr),
/// The network address of the node sending this message, and its
/// advertised network services.
address_from: (PeerServices, net::SocketAddr),
/// Node random nonce, randomly generated every time a version
/// packet is sent. This nonce is used to detect connections
/// to self.
nonce: Nonce,
/// The Zcash user agent advertised by the sender.
user_agent: String,
/// The last block received by the emitting node.
start_height: BlockHeight,
/// Whether the remote peer should announce relayed
/// transactions or not, see [BIP 0037](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki)
relay: bool,
},
/// A `verack` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#verack)
Verack,
/// A `ping` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#ping)
Ping(
/// A nonce unique to this [`Ping`] message.
Nonce,
),
/// A `pong` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#pong)
Pong(
/// The nonce from the [`Ping`] message this was in response to.
Nonce,
),
/// A `reject` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#reject)
Reject {
/// Type of message rejected.
// It's unclear if this is strictly limited to message command
// codes, so leaving it a String.
message: String,
/// RejectReason code relating to rejected message.
ccode: RejectReason,
/// Human-readable version of rejection reason.
reason: String,
/// Optional extra data provided for some errors.
// Currently, all errors which provide this field fill it with
// the TXID or block header hash of the object being rejected,
// so the field is 32 bytes.
//
// Q: can we tell Rust that this field is optional? Or just
// default its value to an empty array, I guess.
data: Option<[u8; 32]>,
},
/// An `addr` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#addr)
Addr(Vec<MetaAddr>),
/// A `getaddr` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getaddr)
GetAddr,
/// A `block` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#block)
Block(Arc<Block>),
/// A `getblocks` message.
///
/// Requests the list of blocks starting right after the last
/// known hash in `block_locator_hashes`, up to `hash_stop` or 500
/// blocks, whichever comes first.
///
/// You can send in fewer known hashes down to a minimum of just
/// one hash. However, the purpose of the block locator object is
/// to detect a wrong branch in the caller's main chain. If the
/// peer detects that you are off the main chain, it will send in
/// block hashes which are earlier than your last known block. So
/// if you just send in your last known hash and it is off the
/// main chain, the peer starts over at block #1.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getblocks)
// The locator hashes are processed by a node in the order as they
// appear in the message. If a block hash is found in the node's
// main chain, the list of its children is returned back via the
// inv message and the remaining locators are ignored, no matter
// if the requested limit was reached, or not.
//
// The 500 headers number is from the Bitcoin docs, we are not
// certain (yet) that other implementations of Zcash obey this
// restriction, or if they don't, what happens if we send them too
// many results.
GetBlocks {
/// Block locators, from newest back to genesis block.
block_locator_hashes: Vec<BlockHeaderHash>,
/// `BlockHeaderHash` of the last desired block. | },
/// A `headers` message.
///
/// Returns block headers in response to a getheaders packet.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#headers)
// Note that the block headers in this packet include a
// transaction count (a var_int, so there can be more than 81
// bytes per header) as opposed to the block headers that are
// hashed by miners.
Headers(Vec<BlockHeader>),
/// A `getheaders` message.
///
/// Requests a series of block headers starting right after the
/// last known hash in `block_locator_hashes`, up to `hash_stop`
/// or 2000 blocks, whichever comes first.
///
/// You can send in fewer known hashes down to a minimum of just
/// one hash. However, the purpose of the block locator object is
/// to detect a wrong branch in the caller's main chain. If the
/// peer detects that you are off the main chain, it will send in
/// block hashes which are earlier than your last known block. So
/// if you just send in your last known hash and it is off the
/// main chain, the peer starts over at block #1.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getheaders)
// The 2000 headers number is from the Bitcoin docs, we are not
// certain (yet) that other implementations of Zcash obey this
// restriction, or if they don't, what happens if we send them too
// many results.
GetHeaders {
/// Block locators, from newest back to genesis block.
block_locator_hashes: Vec<BlockHeaderHash>,
/// `BlockHeaderHash` of the last desired block header.
///
/// Set to zero to get as many block headers as possible (2000).
hash_stop: BlockHeaderHash,
},
/// An `inv` message.
///
/// Allows a node to advertise its knowledge of one or more
/// objects. It can be received unsolicited, or in reply to
/// `getblocks`.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#inv)
Inv(Vec<InventoryHash>),
/// A `getdata` message.
///
/// `getdata` is used in response to `inv`, to retrieve the
/// content of a specific object, and is usually sent after
/// receiving an `inv` packet, after filtering known elements.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getdata)
GetData(Vec<InventoryHash>),
/// A `notfound` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#notfound)
// See note above on `Inventory`.
NotFound(Vec<InventoryHash>),
/// A `tx` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#tx)
Tx(Arc<Transaction>),
/// A `mempool` message.
///
/// This was defined in [BIP35], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#mempool)
/// [BIP35]: https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki
Mempool,
/// A `filterload` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterLoad {
/// The filter itself is simply a bit field of arbitrary
/// byte-aligned size. The maximum size is 36,000 bytes.
filter: Filter,
/// The number of hash functions to use in this filter. The
/// maximum value allowed in this field is 50.
hash_functions_count: u32,
/// A random value to add to the seed value in the hash
/// function used by the bloom filter.
tweak: Tweak,
/// A set of flags that control how matched items are added to the filter.
flags: u8,
},
/// A `filteradd` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterAdd {
/// The data element to add to the current filter.
// The data field must be smaller than or equal to 520 bytes
// in size (the maximum size of any potentially matched
// object).
//
// A Vec instead of [u8; 520] because of needed traits.
data: Vec<u8>,
},
/// A `filterclear` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterClear,
}
impl<E> From<E> for Message
where
E: Error,
{
fn from(e: E) -> Self {
Message::Reject {
message: e.to_string(),
// The generic case, impls for specific error types should
// use specific varieties of `RejectReason`.
ccode: RejectReason::Other,
reason: e.source().unwrap().to_string(),
// Allow this to be overridden but not populated by default, methinks.
data: None,
}
}
}
/// Reject Reason CCodes
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#reject)
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[repr(u8)]
#[allow(missing_docs)]
pub enum RejectReason {
Malformed = 0x01,
Invalid = 0x10,
Obsolete = 0x11,
Duplicate = 0x12,
Nonstandard = 0x40,
Dust = 0x41,
InsufficientFee = 0x42,
Checkpoint = 0x43,
Other = 0x50,
} | ///
/// Set to zero to get as many blocks as possible (500).
hash_stop: BlockHeaderHash, | random_line_split |
main.rs | use random_fast_rng::{FastRng, Random};
use rusqlite::{params, Connection, DropBehavior};
use std::fs;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{mpsc, Arc, RwLock};
use std::thread;
use std::time::{Duration, Instant};
const ITER_SECS: u64 = 5;
const USE_RWLOCK: bool = false;
const SEED_COUNT: usize = 20;
const NEW_ITEM_SIZE: usize = 40 * 1024;
const PRINT_VALUES: bool = false;
/// SQLite's approach to concurrency requires waiting/backing off in case of
/// readers/writers conflict. This sets a max duration before failing.
const DB_TIMEOUT: Duration = Duration::from_secs(6);
struct Database {
conn: rusqlite::Connection,
}
#[derive(Copy, Clone, Debug)]
struct DbOptions {
wal: bool,
shared_cache: bool,
}
impl DbOptions {
fn db_flags(&self) -> rusqlite::OpenFlags {
use rusqlite::OpenFlags;
let mut flags = OpenFlags::empty();
flags.set(OpenFlags::SQLITE_OPEN_CREATE, true);
flags.set(OpenFlags::SQLITE_OPEN_READ_WRITE, true);
flags.set(OpenFlags::SQLITE_OPEN_SHARED_CACHE, self.shared_cache);
flags
}
}
impl Database {
pub fn create<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let path: &Path = path.as_ref();
if path.exists() {
fs::remove_file(path).expect("Could not delete existing database file");
}
let mut db = Self::open(path, options);
db.create_tables(options);
db
}
pub fn open<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let conn = Connection::open_with_flags(path, options.db_flags())
.expect("Could not create SQLite connection");
conn.busy_timeout(DB_TIMEOUT)
.expect("Error setting the database timeout");
Database { conn }
}
fn create_tables(&mut self, options: &DbOptions) {
if options.wal {
self.conn
.pragma_update(None, "journal_mode", &"WAL".to_owned())
.expect("Error applying WAL journal_mode");
}
self.conn
.execute(
r#"
CREATE TABLE "kv" (
"key" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY("key")
) WITHOUT ROWID;
"#,
[],
)
.expect("Error creating tables");
}
pub fn seed(&mut self) -> std::io::Result<Vec<u16>> {
let mut transaction = self
.conn
.transaction()
.expect("Could not open DB transaction");
transaction.set_drop_behavior(DropBehavior::Commit);
let mut query = transaction
.prepare(
r#"
INSERT INTO "kv" VALUES (?1,?2);
"#,
)
.expect("Failed to prepare insert query");
let mut keys = Vec::new();
let mut rng = FastRng::new();
for k in &mut keys {
*k = rng.get_u16();
}
for _ in 0..SEED_COUNT {
let (key, value) = (rng.get_u16(), rng.get_u16());
keys.push(key);
query
.execute(params![key, value])
.expect("Insertion failure seeding database!");
}
Ok(keys)
}
}
fn read_loop(
db: Database,
keys: &[u16],
stop: Arc<AtomicBool>,
rwlock: Arc<RwLock<()>>,
) -> (i32, Vec<i64>) {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
SELECT "value" FROM "kv"
WHERE "key" =?1
LIMIT 1;"#,
)
.expect("Failed to prepare query statement");
let mut reads = 0;
let mut rng = FastRng::new();
while!stop.load(Ordering::Relaxed) {
let key_index = rng.get_usize() % keys.len();
let key = &keys[key_index as usize];
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.read().expect("Cannot unlock for read!");
}
let value: Result<String, _> = query.query_row(&[key], |result| result.get(0));
reads += 1;
let elapsed = timer.elapsed();
if PRINT_VALUES {
if let Ok(value) = value {
println!("{}: {}", key, value);
}
}
times.push(elapsed.as_nanos() as i64);
}
(reads, times)
}
fn write_loop(db: Database, stop: Arc<AtomicBool>, rwlock: Arc<RwLock<()>>) -> Vec<i64> {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
INSERT OR IGNORE INTO "kv" ("key", "value")
VALUES (?1,?2)
"#,
)
.expect("Failed to prepare update statement");
let mut rng = FastRng::new();
let mut value = Vec::new();
value.resize(NEW_ITEM_SIZE, 0u8);
rng.fill_bytes(&mut value);
while!stop.load(Ordering::Relaxed) {
let key = rng.get_u16();
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.write().expect("Cannot unlock for read!");
}
let rows_updated = query
.execute(params![key, value])
.expect("Failed to issue update query!");
let elapsed = timer.elapsed();
if PRINT_VALUES && rows_updated > 0 {
println!("{} set", key);
}
times.push(elapsed.as_nanos() as i64);
}
times
}
fn average(nums: &[i64]) -> f64 {
let sum: i128 = nums.iter().map(|n| *n as i128).sum();
sum as f64 / (nums.len() as f64)
}
struct PerfRecord {
config: String,
readers: i32,
writers: i32,
reads_per_sec: f64,
writes_per_sec: f64,
read_p95: f64,
read_p99: f64,
read_p999: f64,
write_p95: Option<f64>,
write_p99: Option<f64>,
write_p999: Option<f64>,
}
fn main() {
let mut perf_vec = Vec::new();
for options in [
DbOptions { shared_cache: false, wal: false },
DbOptions { shared_cache: false, wal: true },
// Shared cache w/out wal requires unlock_notify to work
DbOptions { shared_cache: true, wal: false },
DbOptions { shared_cache: true, wal: true },
] {
println!("## {:?}", options);
println!("");
let keys = {
let mut db = Database::create("test.db", &options);
db.seed().expect("Error seeding database!")
};
for writers in 0..4 {
let done = Arc::new(AtomicBool::new(false));
let rwlock = Arc::new(RwLock::new(()));
let options = Arc::new(options);
{
let done = done.clone();
thread::spawn(move || {
thread::sleep(Duration::from_secs(ITER_SECS));
done.store(true, Ordering::Release);
});
}
let db = Database::open("test.db", &options);
let (write_counts_send, write_counts_recv) = mpsc::channel();
for _ in 0..writers {
let done = done.clone();
let sender = write_counts_send.clone();
let rwlock = rwlock.clone();
let options = options.clone();
thread::spawn(move || {
let write_db = Database::open("test.db", &options);
let write_times = write_loop(write_db, done, rwlock);
sender
.send(write_times)
.expect("Could not send write count!");
});
}
drop(write_counts_send);
let (total_reads, mut read_times) = read_loop(db, &keys, done.clone(), rwlock.clone());
read_times.sort();
let mut total_writes = 0;
let mut write_times = Vec::new();
for _ in 0..writers {
let mut writes = write_counts_recv
.recv()
.expect("Failed to receive write counts!");
total_writes += writes.len();
write_times.append(&mut writes);
}
write_times.sort();
println!("{} writers:", writers);
println!("- Read {} values from the database.", read_times.len());
println!("- Wrote {} values to the database.", total_writes);
println!(
"- Mean read time: {:.5} ms",
average(&read_times) / 1000_000f64
);
let p95_nanos = read_times[(0.95 * (read_times.len() as f64)) as usize];
let p95_millis = p95_nanos as f64 / 1000_000f64;
println!("- P95: {} ms", p95_millis);
let p99_nanos = read_times[(0.99 * (read_times.len() as f64)) as usize];
let p99_millis = p99_nanos as f64 / 1000_000f64;
println!("- P99: {} ms", p99_millis);
let p99_9_nanos = read_times[(0.999 * (read_times.len() as f64)) as usize];
let p99_9_millis = p99_9_nanos as f64 / 1000_000f64;
println!("- P99.9: {} ms", p99_9_millis);
println!("");
fn not_str(v: bool) -> &'static str {
if v | else { "!" }
}
perf_vec.push(PerfRecord {
config: format!("{}wal, {}shared_cache", not_str(options.wal), not_str(options.shared_cache)),
readers: 1,
writers,
reads_per_sec: total_reads as f64 / ITER_SECS as f64,
writes_per_sec: total_writes as f64 / ITER_SECS as f64,
read_p95: p95_millis,
read_p99: p99_millis,
read_p999: p99_9_millis,
write_p95: if write_times.len() > 0 { Some(write_times[(0.95 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p99: if write_times.len() > 0 { Some(write_times[(0.99 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p999: if write_times.len() > 0 { Some(write_times[(0.999 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
});
}
}
fn print_or<T: std::fmt::Display>(v: Option<T>, o: &str) -> String {
v.map(|v| v.to_string())
.unwrap_or(o.to_owned())
}
let title_width = perf_vec.iter().map(|r| r.config.len()).max().unwrap();
println!("---------------------------------");
println!("");
println!("| configuration | readers | writers | reads/sec | writes/sec | read p95 (ms) | read p99 | read p99.9 | write p95 | write p99 | write p99.9 |");
println!("| ------------- | ------- | ------- | --------- | ---------- | ------------- | -------- | ---------- | --------- | --------- | ----------- |");
for row in perf_vec {
println!("| {:w0$} | {:2} | {:2} | {} | {} | {} | {} | {} | {} | {} | {} |",
row.config, row.readers, row.writers, row.reads_per_sec, row.writes_per_sec,
row.read_p95, row.read_p99, row.read_p999,
print_or(row.write_p95, "N/A"), print_or(row.write_p99, "N/A"), print_or(row.write_p999, "N/A"),
w0 = title_width,
);
}
}
| { "" } | conditional_block |
main.rs | use random_fast_rng::{FastRng, Random};
use rusqlite::{params, Connection, DropBehavior};
use std::fs;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{mpsc, Arc, RwLock};
use std::thread;
use std::time::{Duration, Instant};
const ITER_SECS: u64 = 5;
const USE_RWLOCK: bool = false;
const SEED_COUNT: usize = 20;
const NEW_ITEM_SIZE: usize = 40 * 1024;
const PRINT_VALUES: bool = false;
/// SQLite's approach to concurrency requires waiting/backing off in case of
/// readers/writers conflict. This sets a max duration before failing.
const DB_TIMEOUT: Duration = Duration::from_secs(6);
struct Database {
conn: rusqlite::Connection,
}
#[derive(Copy, Clone, Debug)]
struct DbOptions {
wal: bool,
shared_cache: bool,
}
impl DbOptions {
fn db_flags(&self) -> rusqlite::OpenFlags {
use rusqlite::OpenFlags;
let mut flags = OpenFlags::empty();
flags.set(OpenFlags::SQLITE_OPEN_CREATE, true);
flags.set(OpenFlags::SQLITE_OPEN_READ_WRITE, true);
flags.set(OpenFlags::SQLITE_OPEN_SHARED_CACHE, self.shared_cache);
flags
}
}
impl Database {
pub fn create<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let path: &Path = path.as_ref();
if path.exists() {
fs::remove_file(path).expect("Could not delete existing database file");
}
let mut db = Self::open(path, options);
db.create_tables(options);
db
}
pub fn open<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let conn = Connection::open_with_flags(path, options.db_flags())
.expect("Could not create SQLite connection");
conn.busy_timeout(DB_TIMEOUT)
.expect("Error setting the database timeout");
Database { conn }
}
fn create_tables(&mut self, options: &DbOptions) |
pub fn seed(&mut self) -> std::io::Result<Vec<u16>> {
let mut transaction = self
.conn
.transaction()
.expect("Could not open DB transaction");
transaction.set_drop_behavior(DropBehavior::Commit);
let mut query = transaction
.prepare(
r#"
INSERT INTO "kv" VALUES (?1,?2);
"#,
)
.expect("Failed to prepare insert query");
let mut keys = Vec::new();
let mut rng = FastRng::new();
for k in &mut keys {
*k = rng.get_u16();
}
for _ in 0..SEED_COUNT {
let (key, value) = (rng.get_u16(), rng.get_u16());
keys.push(key);
query
.execute(params![key, value])
.expect("Insertion failure seeding database!");
}
Ok(keys)
}
}
fn read_loop(
db: Database,
keys: &[u16],
stop: Arc<AtomicBool>,
rwlock: Arc<RwLock<()>>,
) -> (i32, Vec<i64>) {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
SELECT "value" FROM "kv"
WHERE "key" =?1
LIMIT 1;"#,
)
.expect("Failed to prepare query statement");
let mut reads = 0;
let mut rng = FastRng::new();
while!stop.load(Ordering::Relaxed) {
let key_index = rng.get_usize() % keys.len();
let key = &keys[key_index as usize];
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.read().expect("Cannot unlock for read!");
}
let value: Result<String, _> = query.query_row(&[key], |result| result.get(0));
reads += 1;
let elapsed = timer.elapsed();
if PRINT_VALUES {
if let Ok(value) = value {
println!("{}: {}", key, value);
}
}
times.push(elapsed.as_nanos() as i64);
}
(reads, times)
}
fn write_loop(db: Database, stop: Arc<AtomicBool>, rwlock: Arc<RwLock<()>>) -> Vec<i64> {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
INSERT OR IGNORE INTO "kv" ("key", "value")
VALUES (?1,?2)
"#,
)
.expect("Failed to prepare update statement");
let mut rng = FastRng::new();
let mut value = Vec::new();
value.resize(NEW_ITEM_SIZE, 0u8);
rng.fill_bytes(&mut value);
while!stop.load(Ordering::Relaxed) {
let key = rng.get_u16();
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.write().expect("Cannot unlock for read!");
}
let rows_updated = query
.execute(params![key, value])
.expect("Failed to issue update query!");
let elapsed = timer.elapsed();
if PRINT_VALUES && rows_updated > 0 {
println!("{} set", key);
}
times.push(elapsed.as_nanos() as i64);
}
times
}
fn average(nums: &[i64]) -> f64 {
let sum: i128 = nums.iter().map(|n| *n as i128).sum();
sum as f64 / (nums.len() as f64)
}
struct PerfRecord {
config: String,
readers: i32,
writers: i32,
reads_per_sec: f64,
writes_per_sec: f64,
read_p95: f64,
read_p99: f64,
read_p999: f64,
write_p95: Option<f64>,
write_p99: Option<f64>,
write_p999: Option<f64>,
}
fn main() {
let mut perf_vec = Vec::new();
for options in [
DbOptions { shared_cache: false, wal: false },
DbOptions { shared_cache: false, wal: true },
// Shared cache w/out wal requires unlock_notify to work
DbOptions { shared_cache: true, wal: false },
DbOptions { shared_cache: true, wal: true },
] {
println!("## {:?}", options);
println!("");
let keys = {
let mut db = Database::create("test.db", &options);
db.seed().expect("Error seeding database!")
};
for writers in 0..4 {
let done = Arc::new(AtomicBool::new(false));
let rwlock = Arc::new(RwLock::new(()));
let options = Arc::new(options);
{
let done = done.clone();
thread::spawn(move || {
thread::sleep(Duration::from_secs(ITER_SECS));
done.store(true, Ordering::Release);
});
}
let db = Database::open("test.db", &options);
let (write_counts_send, write_counts_recv) = mpsc::channel();
for _ in 0..writers {
let done = done.clone();
let sender = write_counts_send.clone();
let rwlock = rwlock.clone();
let options = options.clone();
thread::spawn(move || {
let write_db = Database::open("test.db", &options);
let write_times = write_loop(write_db, done, rwlock);
sender
.send(write_times)
.expect("Could not send write count!");
});
}
drop(write_counts_send);
let (total_reads, mut read_times) = read_loop(db, &keys, done.clone(), rwlock.clone());
read_times.sort();
let mut total_writes = 0;
let mut write_times = Vec::new();
for _ in 0..writers {
let mut writes = write_counts_recv
.recv()
.expect("Failed to receive write counts!");
total_writes += writes.len();
write_times.append(&mut writes);
}
write_times.sort();
println!("{} writers:", writers);
println!("- Read {} values from the database.", read_times.len());
println!("- Wrote {} values to the database.", total_writes);
println!(
"- Mean read time: {:.5} ms",
average(&read_times) / 1000_000f64
);
let p95_nanos = read_times[(0.95 * (read_times.len() as f64)) as usize];
let p95_millis = p95_nanos as f64 / 1000_000f64;
println!("- P95: {} ms", p95_millis);
let p99_nanos = read_times[(0.99 * (read_times.len() as f64)) as usize];
let p99_millis = p99_nanos as f64 / 1000_000f64;
println!("- P99: {} ms", p99_millis);
let p99_9_nanos = read_times[(0.999 * (read_times.len() as f64)) as usize];
let p99_9_millis = p99_9_nanos as f64 / 1000_000f64;
println!("- P99.9: {} ms", p99_9_millis);
println!("");
fn not_str(v: bool) -> &'static str {
if v { "" } else { "!" }
}
perf_vec.push(PerfRecord {
config: format!("{}wal, {}shared_cache", not_str(options.wal), not_str(options.shared_cache)),
readers: 1,
writers,
reads_per_sec: total_reads as f64 / ITER_SECS as f64,
writes_per_sec: total_writes as f64 / ITER_SECS as f64,
read_p95: p95_millis,
read_p99: p99_millis,
read_p999: p99_9_millis,
write_p95: if write_times.len() > 0 { Some(write_times[(0.95 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p99: if write_times.len() > 0 { Some(write_times[(0.99 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p999: if write_times.len() > 0 { Some(write_times[(0.999 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
});
}
}
fn print_or<T: std::fmt::Display>(v: Option<T>, o: &str) -> String {
v.map(|v| v.to_string())
.unwrap_or(o.to_owned())
}
let title_width = perf_vec.iter().map(|r| r.config.len()).max().unwrap();
println!("---------------------------------");
println!("");
println!("| configuration | readers | writers | reads/sec | writes/sec | read p95 (ms) | read p99 | read p99.9 | write p95 | write p99 | write p99.9 |");
println!("| ------------- | ------- | ------- | --------- | ---------- | ------------- | -------- | ---------- | --------- | --------- | ----------- |");
for row in perf_vec {
println!("| {:w0$} | {:2} | {:2} | {} | {} | {} | {} | {} | {} | {} | {} |",
row.config, row.readers, row.writers, row.reads_per_sec, row.writes_per_sec,
row.read_p95, row.read_p99, row.read_p999,
print_or(row.write_p95, "N/A"), print_or(row.write_p99, "N/A"), print_or(row.write_p999, "N/A"),
w0 = title_width,
);
}
}
| {
if options.wal {
self.conn
.pragma_update(None, "journal_mode", &"WAL".to_owned())
.expect("Error applying WAL journal_mode");
}
self.conn
.execute(
r#"
CREATE TABLE "kv" (
"key" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY("key")
) WITHOUT ROWID;
"#,
[],
)
.expect("Error creating tables");
} | identifier_body |
main.rs | use random_fast_rng::{FastRng, Random};
use rusqlite::{params, Connection, DropBehavior};
use std::fs;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{mpsc, Arc, RwLock};
use std::thread;
use std::time::{Duration, Instant};
const ITER_SECS: u64 = 5;
const USE_RWLOCK: bool = false;
const SEED_COUNT: usize = 20;
const NEW_ITEM_SIZE: usize = 40 * 1024;
const PRINT_VALUES: bool = false;
/// SQLite's approach to concurrency requires waiting/backing off in case of
/// readers/writers conflict. This sets a max duration before failing.
const DB_TIMEOUT: Duration = Duration::from_secs(6);
struct | {
conn: rusqlite::Connection,
}
#[derive(Copy, Clone, Debug)]
struct DbOptions {
wal: bool,
shared_cache: bool,
}
impl DbOptions {
fn db_flags(&self) -> rusqlite::OpenFlags {
use rusqlite::OpenFlags;
let mut flags = OpenFlags::empty();
flags.set(OpenFlags::SQLITE_OPEN_CREATE, true);
flags.set(OpenFlags::SQLITE_OPEN_READ_WRITE, true);
flags.set(OpenFlags::SQLITE_OPEN_SHARED_CACHE, self.shared_cache);
flags
}
}
impl Database {
pub fn create<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let path: &Path = path.as_ref();
if path.exists() {
fs::remove_file(path).expect("Could not delete existing database file");
}
let mut db = Self::open(path, options);
db.create_tables(options);
db
}
pub fn open<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let conn = Connection::open_with_flags(path, options.db_flags())
.expect("Could not create SQLite connection");
conn.busy_timeout(DB_TIMEOUT)
.expect("Error setting the database timeout");
Database { conn }
}
fn create_tables(&mut self, options: &DbOptions) {
if options.wal {
self.conn
.pragma_update(None, "journal_mode", &"WAL".to_owned())
.expect("Error applying WAL journal_mode");
}
self.conn
.execute(
r#"
CREATE TABLE "kv" (
"key" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY("key")
) WITHOUT ROWID;
"#,
[],
)
.expect("Error creating tables");
}
pub fn seed(&mut self) -> std::io::Result<Vec<u16>> {
let mut transaction = self
.conn
.transaction()
.expect("Could not open DB transaction");
transaction.set_drop_behavior(DropBehavior::Commit);
let mut query = transaction
.prepare(
r#"
INSERT INTO "kv" VALUES (?1,?2);
"#,
)
.expect("Failed to prepare insert query");
let mut keys = Vec::new();
let mut rng = FastRng::new();
for k in &mut keys {
*k = rng.get_u16();
}
for _ in 0..SEED_COUNT {
let (key, value) = (rng.get_u16(), rng.get_u16());
keys.push(key);
query
.execute(params![key, value])
.expect("Insertion failure seeding database!");
}
Ok(keys)
}
}
fn read_loop(
db: Database,
keys: &[u16],
stop: Arc<AtomicBool>,
rwlock: Arc<RwLock<()>>,
) -> (i32, Vec<i64>) {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
SELECT "value" FROM "kv"
WHERE "key" =?1
LIMIT 1;"#,
)
.expect("Failed to prepare query statement");
let mut reads = 0;
let mut rng = FastRng::new();
while!stop.load(Ordering::Relaxed) {
let key_index = rng.get_usize() % keys.len();
let key = &keys[key_index as usize];
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.read().expect("Cannot unlock for read!");
}
let value: Result<String, _> = query.query_row(&[key], |result| result.get(0));
reads += 1;
let elapsed = timer.elapsed();
if PRINT_VALUES {
if let Ok(value) = value {
println!("{}: {}", key, value);
}
}
times.push(elapsed.as_nanos() as i64);
}
(reads, times)
}
fn write_loop(db: Database, stop: Arc<AtomicBool>, rwlock: Arc<RwLock<()>>) -> Vec<i64> {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
INSERT OR IGNORE INTO "kv" ("key", "value")
VALUES (?1,?2)
"#,
)
.expect("Failed to prepare update statement");
let mut rng = FastRng::new();
let mut value = Vec::new();
value.resize(NEW_ITEM_SIZE, 0u8);
rng.fill_bytes(&mut value);
while!stop.load(Ordering::Relaxed) {
let key = rng.get_u16();
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.write().expect("Cannot unlock for read!");
}
let rows_updated = query
.execute(params![key, value])
.expect("Failed to issue update query!");
let elapsed = timer.elapsed();
if PRINT_VALUES && rows_updated > 0 {
println!("{} set", key);
}
times.push(elapsed.as_nanos() as i64);
}
times
}
fn average(nums: &[i64]) -> f64 {
let sum: i128 = nums.iter().map(|n| *n as i128).sum();
sum as f64 / (nums.len() as f64)
}
struct PerfRecord {
config: String,
readers: i32,
writers: i32,
reads_per_sec: f64,
writes_per_sec: f64,
read_p95: f64,
read_p99: f64,
read_p999: f64,
write_p95: Option<f64>,
write_p99: Option<f64>,
write_p999: Option<f64>,
}
fn main() {
let mut perf_vec = Vec::new();
for options in [
DbOptions { shared_cache: false, wal: false },
DbOptions { shared_cache: false, wal: true },
// Shared cache w/out wal requires unlock_notify to work
DbOptions { shared_cache: true, wal: false },
DbOptions { shared_cache: true, wal: true },
] {
println!("## {:?}", options);
println!("");
let keys = {
let mut db = Database::create("test.db", &options);
db.seed().expect("Error seeding database!")
};
for writers in 0..4 {
let done = Arc::new(AtomicBool::new(false));
let rwlock = Arc::new(RwLock::new(()));
let options = Arc::new(options);
{
let done = done.clone();
thread::spawn(move || {
thread::sleep(Duration::from_secs(ITER_SECS));
done.store(true, Ordering::Release);
});
}
let db = Database::open("test.db", &options);
let (write_counts_send, write_counts_recv) = mpsc::channel();
for _ in 0..writers {
let done = done.clone();
let sender = write_counts_send.clone();
let rwlock = rwlock.clone();
let options = options.clone();
thread::spawn(move || {
let write_db = Database::open("test.db", &options);
let write_times = write_loop(write_db, done, rwlock);
sender
.send(write_times)
.expect("Could not send write count!");
});
}
drop(write_counts_send);
let (total_reads, mut read_times) = read_loop(db, &keys, done.clone(), rwlock.clone());
read_times.sort();
let mut total_writes = 0;
let mut write_times = Vec::new();
for _ in 0..writers {
let mut writes = write_counts_recv
.recv()
.expect("Failed to receive write counts!");
total_writes += writes.len();
write_times.append(&mut writes);
}
write_times.sort();
println!("{} writers:", writers);
println!("- Read {} values from the database.", read_times.len());
println!("- Wrote {} values to the database.", total_writes);
println!(
"- Mean read time: {:.5} ms",
average(&read_times) / 1000_000f64
);
let p95_nanos = read_times[(0.95 * (read_times.len() as f64)) as usize];
let p95_millis = p95_nanos as f64 / 1000_000f64;
println!("- P95: {} ms", p95_millis);
let p99_nanos = read_times[(0.99 * (read_times.len() as f64)) as usize];
let p99_millis = p99_nanos as f64 / 1000_000f64;
println!("- P99: {} ms", p99_millis);
let p99_9_nanos = read_times[(0.999 * (read_times.len() as f64)) as usize];
let p99_9_millis = p99_9_nanos as f64 / 1000_000f64;
println!("- P99.9: {} ms", p99_9_millis);
println!("");
fn not_str(v: bool) -> &'static str {
if v { "" } else { "!" }
}
perf_vec.push(PerfRecord {
config: format!("{}wal, {}shared_cache", not_str(options.wal), not_str(options.shared_cache)),
readers: 1,
writers,
reads_per_sec: total_reads as f64 / ITER_SECS as f64,
writes_per_sec: total_writes as f64 / ITER_SECS as f64,
read_p95: p95_millis,
read_p99: p99_millis,
read_p999: p99_9_millis,
write_p95: if write_times.len() > 0 { Some(write_times[(0.95 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p99: if write_times.len() > 0 { Some(write_times[(0.99 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p999: if write_times.len() > 0 { Some(write_times[(0.999 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
});
}
}
fn print_or<T: std::fmt::Display>(v: Option<T>, o: &str) -> String {
v.map(|v| v.to_string())
.unwrap_or(o.to_owned())
}
let title_width = perf_vec.iter().map(|r| r.config.len()).max().unwrap();
println!("---------------------------------");
println!("");
println!("| configuration | readers | writers | reads/sec | writes/sec | read p95 (ms) | read p99 | read p99.9 | write p95 | write p99 | write p99.9 |");
println!("| ------------- | ------- | ------- | --------- | ---------- | ------------- | -------- | ---------- | --------- | --------- | ----------- |");
for row in perf_vec {
println!("| {:w0$} | {:2} | {:2} | {} | {} | {} | {} | {} | {} | {} | {} |",
row.config, row.readers, row.writers, row.reads_per_sec, row.writes_per_sec,
row.read_p95, row.read_p99, row.read_p999,
print_or(row.write_p95, "N/A"), print_or(row.write_p99, "N/A"), print_or(row.write_p999, "N/A"),
w0 = title_width,
);
}
}
| Database | identifier_name |
main.rs | use random_fast_rng::{FastRng, Random};
use rusqlite::{params, Connection, DropBehavior};
use std::fs;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{mpsc, Arc, RwLock};
use std::thread;
use std::time::{Duration, Instant};
const ITER_SECS: u64 = 5;
const USE_RWLOCK: bool = false;
const SEED_COUNT: usize = 20;
const NEW_ITEM_SIZE: usize = 40 * 1024;
const PRINT_VALUES: bool = false;
/// SQLite's approach to concurrency requires waiting/backing off in case of
/// readers/writers conflict. This sets a max duration before failing.
const DB_TIMEOUT: Duration = Duration::from_secs(6);
struct Database {
conn: rusqlite::Connection,
}
#[derive(Copy, Clone, Debug)]
struct DbOptions {
wal: bool,
shared_cache: bool,
}
impl DbOptions {
fn db_flags(&self) -> rusqlite::OpenFlags {
use rusqlite::OpenFlags;
let mut flags = OpenFlags::empty();
flags.set(OpenFlags::SQLITE_OPEN_CREATE, true);
flags.set(OpenFlags::SQLITE_OPEN_READ_WRITE, true);
flags.set(OpenFlags::SQLITE_OPEN_SHARED_CACHE, self.shared_cache);
flags
}
}
impl Database {
pub fn create<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let path: &Path = path.as_ref();
if path.exists() {
fs::remove_file(path).expect("Could not delete existing database file");
}
let mut db = Self::open(path, options);
db.create_tables(options);
db
}
pub fn open<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let conn = Connection::open_with_flags(path, options.db_flags())
.expect("Could not create SQLite connection");
conn.busy_timeout(DB_TIMEOUT)
.expect("Error setting the database timeout");
Database { conn }
}
fn create_tables(&mut self, options: &DbOptions) {
if options.wal {
self.conn
.pragma_update(None, "journal_mode", &"WAL".to_owned())
.expect("Error applying WAL journal_mode");
}
self.conn
.execute(
r#"
CREATE TABLE "kv" (
"key" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY("key")
) WITHOUT ROWID;
"#,
[],
)
.expect("Error creating tables");
}
pub fn seed(&mut self) -> std::io::Result<Vec<u16>> {
let mut transaction = self
.conn
.transaction()
.expect("Could not open DB transaction");
transaction.set_drop_behavior(DropBehavior::Commit);
let mut query = transaction
.prepare(
r#"
INSERT INTO "kv" VALUES (?1,?2);
"#,
)
.expect("Failed to prepare insert query");
let mut keys = Vec::new();
let mut rng = FastRng::new();
for k in &mut keys {
*k = rng.get_u16();
}
for _ in 0..SEED_COUNT {
let (key, value) = (rng.get_u16(), rng.get_u16());
keys.push(key);
query
.execute(params![key, value])
.expect("Insertion failure seeding database!");
}
Ok(keys)
}
}
fn read_loop(
db: Database,
keys: &[u16],
stop: Arc<AtomicBool>,
rwlock: Arc<RwLock<()>>,
) -> (i32, Vec<i64>) {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
SELECT "value" FROM "kv"
WHERE "key" =?1
LIMIT 1;"#,
)
.expect("Failed to prepare query statement");
let mut reads = 0;
let mut rng = FastRng::new();
while!stop.load(Ordering::Relaxed) {
let key_index = rng.get_usize() % keys.len();
let key = &keys[key_index as usize];
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.read().expect("Cannot unlock for read!");
}
let value: Result<String, _> = query.query_row(&[key], |result| result.get(0));
reads += 1;
let elapsed = timer.elapsed();
if PRINT_VALUES {
if let Ok(value) = value {
println!("{}: {}", key, value);
}
}
times.push(elapsed.as_nanos() as i64);
}
(reads, times)
}
fn write_loop(db: Database, stop: Arc<AtomicBool>, rwlock: Arc<RwLock<()>>) -> Vec<i64> {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
INSERT OR IGNORE INTO "kv" ("key", "value")
VALUES (?1,?2)
"#,
)
.expect("Failed to prepare update statement");
let mut rng = FastRng::new();
let mut value = Vec::new();
value.resize(NEW_ITEM_SIZE, 0u8); | let key = rng.get_u16();
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.write().expect("Cannot unlock for read!");
}
let rows_updated = query
.execute(params![key, value])
.expect("Failed to issue update query!");
let elapsed = timer.elapsed();
if PRINT_VALUES && rows_updated > 0 {
println!("{} set", key);
}
times.push(elapsed.as_nanos() as i64);
}
times
}
fn average(nums: &[i64]) -> f64 {
let sum: i128 = nums.iter().map(|n| *n as i128).sum();
sum as f64 / (nums.len() as f64)
}
struct PerfRecord {
config: String,
readers: i32,
writers: i32,
reads_per_sec: f64,
writes_per_sec: f64,
read_p95: f64,
read_p99: f64,
read_p999: f64,
write_p95: Option<f64>,
write_p99: Option<f64>,
write_p999: Option<f64>,
}
fn main() {
let mut perf_vec = Vec::new();
for options in [
DbOptions { shared_cache: false, wal: false },
DbOptions { shared_cache: false, wal: true },
// Shared cache w/out wal requires unlock_notify to work
DbOptions { shared_cache: true, wal: false },
DbOptions { shared_cache: true, wal: true },
] {
println!("## {:?}", options);
println!("");
let keys = {
let mut db = Database::create("test.db", &options);
db.seed().expect("Error seeding database!")
};
for writers in 0..4 {
let done = Arc::new(AtomicBool::new(false));
let rwlock = Arc::new(RwLock::new(()));
let options = Arc::new(options);
{
let done = done.clone();
thread::spawn(move || {
thread::sleep(Duration::from_secs(ITER_SECS));
done.store(true, Ordering::Release);
});
}
let db = Database::open("test.db", &options);
let (write_counts_send, write_counts_recv) = mpsc::channel();
for _ in 0..writers {
let done = done.clone();
let sender = write_counts_send.clone();
let rwlock = rwlock.clone();
let options = options.clone();
thread::spawn(move || {
let write_db = Database::open("test.db", &options);
let write_times = write_loop(write_db, done, rwlock);
sender
.send(write_times)
.expect("Could not send write count!");
});
}
drop(write_counts_send);
let (total_reads, mut read_times) = read_loop(db, &keys, done.clone(), rwlock.clone());
read_times.sort();
let mut total_writes = 0;
let mut write_times = Vec::new();
for _ in 0..writers {
let mut writes = write_counts_recv
.recv()
.expect("Failed to receive write counts!");
total_writes += writes.len();
write_times.append(&mut writes);
}
write_times.sort();
println!("{} writers:", writers);
println!("- Read {} values from the database.", read_times.len());
println!("- Wrote {} values to the database.", total_writes);
println!(
"- Mean read time: {:.5} ms",
average(&read_times) / 1000_000f64
);
let p95_nanos = read_times[(0.95 * (read_times.len() as f64)) as usize];
let p95_millis = p95_nanos as f64 / 1000_000f64;
println!("- P95: {} ms", p95_millis);
let p99_nanos = read_times[(0.99 * (read_times.len() as f64)) as usize];
let p99_millis = p99_nanos as f64 / 1000_000f64;
println!("- P99: {} ms", p99_millis);
let p99_9_nanos = read_times[(0.999 * (read_times.len() as f64)) as usize];
let p99_9_millis = p99_9_nanos as f64 / 1000_000f64;
println!("- P99.9: {} ms", p99_9_millis);
println!("");
fn not_str(v: bool) -> &'static str {
if v { "" } else { "!" }
}
perf_vec.push(PerfRecord {
config: format!("{}wal, {}shared_cache", not_str(options.wal), not_str(options.shared_cache)),
readers: 1,
writers,
reads_per_sec: total_reads as f64 / ITER_SECS as f64,
writes_per_sec: total_writes as f64 / ITER_SECS as f64,
read_p95: p95_millis,
read_p99: p99_millis,
read_p999: p99_9_millis,
write_p95: if write_times.len() > 0 { Some(write_times[(0.95 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p99: if write_times.len() > 0 { Some(write_times[(0.99 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p999: if write_times.len() > 0 { Some(write_times[(0.999 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
});
}
}
fn print_or<T: std::fmt::Display>(v: Option<T>, o: &str) -> String {
v.map(|v| v.to_string())
.unwrap_or(o.to_owned())
}
let title_width = perf_vec.iter().map(|r| r.config.len()).max().unwrap();
println!("---------------------------------");
println!("");
println!("| configuration | readers | writers | reads/sec | writes/sec | read p95 (ms) | read p99 | read p99.9 | write p95 | write p99 | write p99.9 |");
println!("| ------------- | ------- | ------- | --------- | ---------- | ------------- | -------- | ---------- | --------- | --------- | ----------- |");
for row in perf_vec {
println!("| {:w0$} | {:2} | {:2} | {} | {} | {} | {} | {} | {} | {} | {} |",
row.config, row.readers, row.writers, row.reads_per_sec, row.writes_per_sec,
row.read_p95, row.read_p99, row.read_p999,
print_or(row.write_p95, "N/A"), print_or(row.write_p99, "N/A"), print_or(row.write_p999, "N/A"),
w0 = title_width,
);
}
} | rng.fill_bytes(&mut value);
while !stop.load(Ordering::Relaxed) { | random_line_split |
lib.rs | //! Extensions for [glutin](https://crates.io/crates/glutin) to initialize & update old school
//! [gfx](https://crates.io/crates/gfx). _An alternative to gfx_window_glutin_.
//!
//! # Example
//! ```no_run
//! type ColorFormat = gfx::format::Srgba8;
//! type DepthFormat = gfx::format::DepthStencil;
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let event_loop = winit::event_loop::EventLoop::new();
//! let window_builder = winit::window::WindowBuilder::new();
//!
//! // Initialise winit window, glutin context & gfx views
//! let old_school_gfx_glutin_ext::Init {
//! // winit window
//! window,
//! // glutin bits
//! gl_config,
//! gl_surface,
//! gl_context,
//! // gfx bits
//! mut device,
//! mut factory,
//! mut color_view,
//! mut depth_view,
//! ..
//! } = old_school_gfx_glutin_ext::window_builder(&event_loop, window_builder)
//! .build::<ColorFormat, DepthFormat>()?;
//!
//! # let new_size = winit::dpi::PhysicalSize::new(1, 1);
//! // Update gfx views, e.g. after a window resize
//! old_school_gfx_glutin_ext::resize_views(new_size, &mut color_view, &mut depth_view);
//! # Ok(()) }
//! ```
use gfx_core::{
format::{ChannelType, DepthFormat, Format, RenderFormat},
handle::{DepthStencilView, RawDepthStencilView, RawRenderTargetView, RenderTargetView},
memory::Typed,
texture,
};
use gfx_device_gl::Resources as R;
use glutin::{
config::{ColorBufferType, ConfigTemplateBuilder},
context::ContextAttributesBuilder,
display::GetGlDisplay,
prelude::{GlConfig, GlDisplay, NotCurrentGlContextSurfaceAccessor},
surface::{SurfaceAttributesBuilder, WindowSurface},
};
use glutin_winit::GlWindow;
use raw_window_handle::HasRawWindowHandle;
use std::{error::Error, ffi::CString};
/// Returns a builder for initialising a winit window, glutin context & gfx views.
pub fn window_builder<T:'static>(
event_loop: &winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
) -> Builder<'_, T> {
Builder {
event_loop,
winit,
surface_attrs: <_>::default(),
ctx_attrs: <_>::default(),
config_attrs: <_>::default(),
sample_number_pref: <_>::default(),
}
}
/// Builder for initialising a winit window, glutin context & gfx views.
#[derive(Debug, Clone)]
pub struct Builder<'a, T:'static> {
event_loop: &'a winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
surface_attrs: Option<SurfaceAttributesBuilder<WindowSurface>>,
ctx_attrs: ContextAttributesBuilder,
config_attrs: ConfigTemplateBuilder,
sample_number_pref: NumberOfSamples,
}
impl<T> Builder<'_, T> {
/// Configure surface attributes.
///
/// If not called glutin default settings are used.
pub fn surface_attributes(
mut self,
surface_attrs: SurfaceAttributesBuilder<WindowSurface>,
) -> Self {
self.surface_attrs = Some(surface_attrs);
self
}
/// Configure context attributes.
///
/// If not called glutin default settings are used.
pub fn context_attributes(mut self, ctx_attrs: ContextAttributesBuilder) -> Self {
self.ctx_attrs = ctx_attrs;
self
}
/// Configure [`ConfigTemplateBuilder`].
pub fn config_template(mut self, conf: ConfigTemplateBuilder) -> Self {
self.config_attrs = conf;
self
}
/// Configure [`NumberOfSamples`] preference.
///
/// Default `0` / no samples.
pub fn number_of_samples(mut self, pref: impl Into<NumberOfSamples>) -> Self {
self.sample_number_pref = pref.into();
self
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build<Color, Depth>(self) -> Result<Init<Color, Depth>, Box<dyn Error>>
where
Color: RenderFormat,
Depth: DepthFormat,
{
self.build_raw(Color::get_format(), Depth::get_format())
.map(|i| i.into_typed())
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build_raw(
self,
color_format: Format,
depth_format: Format,
) -> Result<RawInit, Box<dyn Error>> {
let Format(color_surface, color_channel) = color_format;
let color_total_bits = color_surface.get_total_bits();
let alpha_bits = color_surface.get_alpha_stencil_bits();
let depth_total_bits = depth_format.0.get_total_bits();
let stencil_bits = depth_format.0.get_alpha_stencil_bits();
let srgb = color_channel == ChannelType::Srgb;
let surface_attrs = self
.surface_attrs
.unwrap_or_else(|| SurfaceAttributesBuilder::new().with_srgb(srgb.then_some(true)));
let config_attrs = self
.config_attrs
.with_alpha_size(alpha_bits)
.with_depth_size(depth_total_bits - stencil_bits)
.with_stencil_size(stencil_bits);
let mut no_suitable_config = false;
let (window, gl_config) = glutin_winit::DisplayBuilder::new()
.with_window_builder(Some(self.winit))
.build(self.event_loop, config_attrs, |configs| {
let mut configs: Vec<_> = configs.collect();
assert!(!configs.is_empty(), "no gl configs?");
let best = self
.sample_number_pref
.find(configs.iter().enumerate().filter(|(_, c)| {
let color_bits = match c.color_buffer_type() {
None => 0,
Some(ColorBufferType::Luminance(s)) => s,
Some(ColorBufferType::Rgb {
r_size,
g_size,
b_size,
}) => r_size + g_size + b_size,
};
(!srgb || c.srgb_capable())
&& color_bits == color_total_bits - alpha_bits
&& c.alpha_size() == alpha_bits
&& c.depth_size() == depth_total_bits - stencil_bits
&& c.stencil_size() == stencil_bits
}));
match best {
Some((idx, _)) => configs.swap_remove(idx),
None => {
no_suitable_config = true;
configs.swap_remove(0)
}
}
})?;
if no_suitable_config {
return Err("no suitable gl config found, color+depth not supported?".into());
}
let window = window.unwrap(); // set in display builder
let raw_window_handle = window.raw_window_handle();
let gl_display = gl_config.display();
let (gl_surface, gl_context) = {
let ctx_attrs = self.ctx_attrs.build(Some(raw_window_handle));
let surface_attrs = window.build_surface_attributes(surface_attrs);
let surface = unsafe { gl_display.create_window_surface(&gl_config, &surface_attrs)? };
let context = unsafe { gl_display.create_context(&gl_config, &ctx_attrs)? }
.make_current(&surface)?;
(surface, context)
};
let (device, factory) =
gfx_device_gl::create(|s| gl_display.get_proc_address(&CString::new(s).unwrap()) as _);
let window_size = window.inner_size();
let tex_dimensions = (
window_size.width as _,
window_size.height as _,
1,
gl_config.num_samples().into(),
);
let (color_view, depth_view) =
gfx_device_gl::create_main_targets_raw(tex_dimensions, color_surface, depth_format.0);
Ok(RawInit {
window,
gl_config,
gl_surface,
gl_context,
device,
factory,
color_view,
depth_view,
})
}
}
/// Initialised winit, glutin & gfx state.
#[non_exhaustive]
pub struct InitState<ColorView, DepthView> {
// winit
pub window: winit::window::Window,
// glutin
pub gl_config: glutin::config::Config,
pub gl_surface: glutin::surface::Surface<WindowSurface>,
pub gl_context: glutin::context::PossiblyCurrentContext,
// gfx
pub device: gfx_device_gl::Device,
pub factory: gfx_device_gl::Factory,
pub color_view: ColorView,
pub depth_view: DepthView,
}
/// "Raw" initialised winit, glutin & gfx state.
pub type RawInit = InitState<RawRenderTargetView<R>, RawDepthStencilView<R>>;
/// Initialised winit, glutin & gfx state.
pub type Init<Color, Depth> = InitState<RenderTargetView<R, Color>, DepthStencilView<R, Depth>>;
impl RawInit {
fn into_typed<Color: RenderFormat, Depth: DepthFormat>(self) -> Init<Color, Depth> |
}
/// Recreate and replace gfx views if the dimensions have changed.
pub fn resize_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &mut RenderTargetView<R, Color>,
depth_view: &mut DepthStencilView<R, Depth>,
) {
if let Some((cv, dv)) = resized_views(new_size, color_view, depth_view) {
*color_view = cv;
*depth_view = dv;
}
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &RenderTargetView<R, Color>,
depth_view: &DepthStencilView<R, Depth>,
) -> Option<(RenderTargetView<R, Color>, DepthStencilView<R, Depth>)> {
let old_dimensions = color_view.get_dimensions();
debug_assert_eq!(old_dimensions, depth_view.get_dimensions());
let (cv, dv) = resized_views_raw(
new_size,
old_dimensions,
Color::get_format(),
Depth::get_format(),
)?;
Some((Typed::new(cv), Typed::new(dv)))
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views_raw(
new_size: winit::dpi::PhysicalSize<u32>,
old_dimensions: texture::Dimensions,
color_fmt: Format,
ds_fmt: Format,
) -> Option<(RawRenderTargetView<R>, RawDepthStencilView<R>)> {
let new_dimensions = (
new_size.width as _,
new_size.height as _,
old_dimensions.2,
old_dimensions.3,
);
if old_dimensions == new_dimensions {
return None;
}
Some(gfx_device_gl::create_main_targets_raw(
new_dimensions,
color_fmt.0,
ds_fmt.0,
))
}
/// Preference for picking [`glutin::config::GlConfig::num_samples`].
#[derive(Debug, Clone, Copy)]
pub enum NumberOfSamples {
/// Pick a config with the highest number of samples.
Max,
/// Pick a config with a specific number of samples.
///
/// E.g. `Specific(0)` mean no multisamples.
Specific(u8),
}
impl Default for NumberOfSamples {
fn default() -> Self {
Self::Specific(0)
}
}
impl From<u8> for NumberOfSamples {
fn from(val: u8) -> Self {
Self::Specific(val)
}
}
impl NumberOfSamples {
fn find<'a>(
self,
mut configs: impl Iterator<Item = (usize, &'a glutin::config::Config)>,
) -> Option<(usize, &'a glutin::config::Config)> {
match self {
Self::Max => configs.max_by_key(|(_, c)| c.num_samples()),
Self::Specific(n) => configs.find(|(_, c)| c.num_samples() == n),
}
}
}
| {
Init {
window: self.window,
gl_config: self.gl_config,
gl_surface: self.gl_surface,
gl_context: self.gl_context,
device: self.device,
factory: self.factory,
color_view: Typed::new(self.color_view),
depth_view: Typed::new(self.depth_view),
}
} | identifier_body |
lib.rs | //! Extensions for [glutin](https://crates.io/crates/glutin) to initialize & update old school
//! [gfx](https://crates.io/crates/gfx). _An alternative to gfx_window_glutin_.
//!
//! # Example
//! ```no_run
//! type ColorFormat = gfx::format::Srgba8;
//! type DepthFormat = gfx::format::DepthStencil;
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let event_loop = winit::event_loop::EventLoop::new();
//! let window_builder = winit::window::WindowBuilder::new();
//!
//! // Initialise winit window, glutin context & gfx views
//! let old_school_gfx_glutin_ext::Init {
//! // winit window
//! window,
//! // glutin bits
//! gl_config,
//! gl_surface,
//! gl_context,
//! // gfx bits
//! mut device,
//! mut factory,
//! mut color_view,
//! mut depth_view,
//! ..
//! } = old_school_gfx_glutin_ext::window_builder(&event_loop, window_builder)
//! .build::<ColorFormat, DepthFormat>()?;
//!
//! # let new_size = winit::dpi::PhysicalSize::new(1, 1);
//! // Update gfx views, e.g. after a window resize
//! old_school_gfx_glutin_ext::resize_views(new_size, &mut color_view, &mut depth_view);
//! # Ok(()) }
//! ```
use gfx_core::{
format::{ChannelType, DepthFormat, Format, RenderFormat},
handle::{DepthStencilView, RawDepthStencilView, RawRenderTargetView, RenderTargetView},
memory::Typed,
texture,
};
use gfx_device_gl::Resources as R;
use glutin::{
config::{ColorBufferType, ConfigTemplateBuilder},
context::ContextAttributesBuilder,
display::GetGlDisplay,
prelude::{GlConfig, GlDisplay, NotCurrentGlContextSurfaceAccessor},
surface::{SurfaceAttributesBuilder, WindowSurface},
};
use glutin_winit::GlWindow;
use raw_window_handle::HasRawWindowHandle;
use std::{error::Error, ffi::CString};
/// Returns a builder for initialising a winit window, glutin context & gfx views.
pub fn window_builder<T:'static>(
event_loop: &winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
) -> Builder<'_, T> {
Builder {
event_loop,
winit,
surface_attrs: <_>::default(),
ctx_attrs: <_>::default(),
config_attrs: <_>::default(),
sample_number_pref: <_>::default(),
}
}
/// Builder for initialising a winit window, glutin context & gfx views.
#[derive(Debug, Clone)]
pub struct Builder<'a, T:'static> {
event_loop: &'a winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
surface_attrs: Option<SurfaceAttributesBuilder<WindowSurface>>,
ctx_attrs: ContextAttributesBuilder,
config_attrs: ConfigTemplateBuilder,
sample_number_pref: NumberOfSamples,
}
impl<T> Builder<'_, T> {
/// Configure surface attributes.
///
/// If not called glutin default settings are used.
pub fn surface_attributes(
mut self,
surface_attrs: SurfaceAttributesBuilder<WindowSurface>,
) -> Self {
self.surface_attrs = Some(surface_attrs);
self
}
/// Configure context attributes.
///
/// If not called glutin default settings are used.
pub fn context_attributes(mut self, ctx_attrs: ContextAttributesBuilder) -> Self {
self.ctx_attrs = ctx_attrs;
self
}
/// Configure [`ConfigTemplateBuilder`].
pub fn config_template(mut self, conf: ConfigTemplateBuilder) -> Self {
self.config_attrs = conf;
self
}
/// Configure [`NumberOfSamples`] preference.
///
/// Default `0` / no samples.
pub fn number_of_samples(mut self, pref: impl Into<NumberOfSamples>) -> Self {
self.sample_number_pref = pref.into();
self
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build<Color, Depth>(self) -> Result<Init<Color, Depth>, Box<dyn Error>>
where
Color: RenderFormat,
Depth: DepthFormat,
{
self.build_raw(Color::get_format(), Depth::get_format())
.map(|i| i.into_typed())
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build_raw(
self,
color_format: Format,
depth_format: Format,
) -> Result<RawInit, Box<dyn Error>> {
let Format(color_surface, color_channel) = color_format;
let color_total_bits = color_surface.get_total_bits();
let alpha_bits = color_surface.get_alpha_stencil_bits();
let depth_total_bits = depth_format.0.get_total_bits();
let stencil_bits = depth_format.0.get_alpha_stencil_bits();
let srgb = color_channel == ChannelType::Srgb;
let surface_attrs = self
.surface_attrs
.unwrap_or_else(|| SurfaceAttributesBuilder::new().with_srgb(srgb.then_some(true)));
let config_attrs = self
.config_attrs
.with_alpha_size(alpha_bits)
.with_depth_size(depth_total_bits - stencil_bits)
.with_stencil_size(stencil_bits);
let mut no_suitable_config = false;
let (window, gl_config) = glutin_winit::DisplayBuilder::new()
.with_window_builder(Some(self.winit))
.build(self.event_loop, config_attrs, |configs| {
let mut configs: Vec<_> = configs.collect();
assert!(!configs.is_empty(), "no gl configs?");
let best = self
.sample_number_pref
.find(configs.iter().enumerate().filter(|(_, c)| {
let color_bits = match c.color_buffer_type() {
None => 0,
Some(ColorBufferType::Luminance(s)) => s,
Some(ColorBufferType::Rgb {
r_size,
g_size,
b_size,
}) => r_size + g_size + b_size,
};
(!srgb || c.srgb_capable())
&& color_bits == color_total_bits - alpha_bits
&& c.alpha_size() == alpha_bits
&& c.depth_size() == depth_total_bits - stencil_bits
&& c.stencil_size() == stencil_bits
}));
match best {
Some((idx, _)) => configs.swap_remove(idx),
None => {
no_suitable_config = true;
configs.swap_remove(0)
}
}
})?;
if no_suitable_config {
return Err("no suitable gl config found, color+depth not supported?".into());
}
let window = window.unwrap(); // set in display builder
let raw_window_handle = window.raw_window_handle();
let gl_display = gl_config.display();
let (gl_surface, gl_context) = {
let ctx_attrs = self.ctx_attrs.build(Some(raw_window_handle));
let surface_attrs = window.build_surface_attributes(surface_attrs);
let surface = unsafe { gl_display.create_window_surface(&gl_config, &surface_attrs)? };
let context = unsafe { gl_display.create_context(&gl_config, &ctx_attrs)? }
.make_current(&surface)?;
(surface, context)
};
let (device, factory) =
gfx_device_gl::create(|s| gl_display.get_proc_address(&CString::new(s).unwrap()) as _);
let window_size = window.inner_size();
let tex_dimensions = (
window_size.width as _,
window_size.height as _,
1,
gl_config.num_samples().into(),
);
let (color_view, depth_view) =
gfx_device_gl::create_main_targets_raw(tex_dimensions, color_surface, depth_format.0);
Ok(RawInit {
window,
gl_config,
gl_surface,
gl_context,
device,
factory,
color_view,
depth_view,
})
}
}
/// Initialised winit, glutin & gfx state.
#[non_exhaustive]
pub struct InitState<ColorView, DepthView> {
// winit
pub window: winit::window::Window,
// glutin
pub gl_config: glutin::config::Config,
pub gl_surface: glutin::surface::Surface<WindowSurface>,
pub gl_context: glutin::context::PossiblyCurrentContext,
// gfx
pub device: gfx_device_gl::Device,
pub factory: gfx_device_gl::Factory,
pub color_view: ColorView,
pub depth_view: DepthView,
}
/// "Raw" initialised winit, glutin & gfx state.
pub type RawInit = InitState<RawRenderTargetView<R>, RawDepthStencilView<R>>;
/// Initialised winit, glutin & gfx state.
pub type Init<Color, Depth> = InitState<RenderTargetView<R, Color>, DepthStencilView<R, Depth>>;
impl RawInit {
fn into_typed<Color: RenderFormat, Depth: DepthFormat>(self) -> Init<Color, Depth> {
Init {
window: self.window,
gl_config: self.gl_config,
gl_surface: self.gl_surface,
gl_context: self.gl_context,
device: self.device,
factory: self.factory,
color_view: Typed::new(self.color_view),
depth_view: Typed::new(self.depth_view),
}
}
}
/// Recreate and replace gfx views if the dimensions have changed.
pub fn resize_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &mut RenderTargetView<R, Color>,
depth_view: &mut DepthStencilView<R, Depth>,
) {
if let Some((cv, dv)) = resized_views(new_size, color_view, depth_view) {
*color_view = cv;
*depth_view = dv;
}
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &RenderTargetView<R, Color>,
depth_view: &DepthStencilView<R, Depth>,
) -> Option<(RenderTargetView<R, Color>, DepthStencilView<R, Depth>)> {
let old_dimensions = color_view.get_dimensions();
debug_assert_eq!(old_dimensions, depth_view.get_dimensions());
let (cv, dv) = resized_views_raw(
new_size,
old_dimensions,
Color::get_format(),
Depth::get_format(),
)?;
Some((Typed::new(cv), Typed::new(dv)))
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views_raw(
new_size: winit::dpi::PhysicalSize<u32>,
old_dimensions: texture::Dimensions,
color_fmt: Format,
ds_fmt: Format,
) -> Option<(RawRenderTargetView<R>, RawDepthStencilView<R>)> {
let new_dimensions = (
new_size.width as _,
new_size.height as _,
old_dimensions.2,
old_dimensions.3,
);
if old_dimensions == new_dimensions {
return None;
}
Some(gfx_device_gl::create_main_targets_raw(
new_dimensions,
color_fmt.0,
ds_fmt.0,
))
}
/// Preference for picking [`glutin::config::GlConfig::num_samples`].
#[derive(Debug, Clone, Copy)]
pub enum NumberOfSamples {
/// Pick a config with the highest number of samples.
Max,
/// Pick a config with a specific number of samples.
///
/// E.g. `Specific(0)` mean no multisamples.
Specific(u8),
}
impl Default for NumberOfSamples {
fn | () -> Self {
Self::Specific(0)
}
}
impl From<u8> for NumberOfSamples {
fn from(val: u8) -> Self {
Self::Specific(val)
}
}
impl NumberOfSamples {
fn find<'a>(
self,
mut configs: impl Iterator<Item = (usize, &'a glutin::config::Config)>,
) -> Option<(usize, &'a glutin::config::Config)> {
match self {
Self::Max => configs.max_by_key(|(_, c)| c.num_samples()),
Self::Specific(n) => configs.find(|(_, c)| c.num_samples() == n),
}
}
}
| default | identifier_name |
lib.rs | //! Extensions for [glutin](https://crates.io/crates/glutin) to initialize & update old school
//! [gfx](https://crates.io/crates/gfx). _An alternative to gfx_window_glutin_.
//!
//! # Example
//! ```no_run
//! type ColorFormat = gfx::format::Srgba8;
//! type DepthFormat = gfx::format::DepthStencil;
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let event_loop = winit::event_loop::EventLoop::new();
//! let window_builder = winit::window::WindowBuilder::new();
//!
//! // Initialise winit window, glutin context & gfx views
//! let old_school_gfx_glutin_ext::Init {
//! // winit window
//! window,
//! // glutin bits
//! gl_config,
//! gl_surface,
//! gl_context,
//! // gfx bits
//! mut device,
//! mut factory,
//! mut color_view,
//! mut depth_view,
//! ..
//! } = old_school_gfx_glutin_ext::window_builder(&event_loop, window_builder)
//! .build::<ColorFormat, DepthFormat>()?;
//!
//! # let new_size = winit::dpi::PhysicalSize::new(1, 1);
//! // Update gfx views, e.g. after a window resize
//! old_school_gfx_glutin_ext::resize_views(new_size, &mut color_view, &mut depth_view);
//! # Ok(()) }
//! ```
use gfx_core::{
format::{ChannelType, DepthFormat, Format, RenderFormat},
handle::{DepthStencilView, RawDepthStencilView, RawRenderTargetView, RenderTargetView},
memory::Typed,
texture,
};
use gfx_device_gl::Resources as R;
use glutin::{
config::{ColorBufferType, ConfigTemplateBuilder},
context::ContextAttributesBuilder,
display::GetGlDisplay,
prelude::{GlConfig, GlDisplay, NotCurrentGlContextSurfaceAccessor},
surface::{SurfaceAttributesBuilder, WindowSurface},
};
use glutin_winit::GlWindow;
use raw_window_handle::HasRawWindowHandle;
use std::{error::Error, ffi::CString};
/// Returns a builder for initialising a winit window, glutin context & gfx views.
pub fn window_builder<T:'static>(
event_loop: &winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
) -> Builder<'_, T> {
Builder {
event_loop,
winit,
surface_attrs: <_>::default(),
ctx_attrs: <_>::default(),
config_attrs: <_>::default(),
sample_number_pref: <_>::default(),
}
}
/// Builder for initialising a winit window, glutin context & gfx views.
#[derive(Debug, Clone)]
pub struct Builder<'a, T:'static> {
event_loop: &'a winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
surface_attrs: Option<SurfaceAttributesBuilder<WindowSurface>>,
ctx_attrs: ContextAttributesBuilder,
config_attrs: ConfigTemplateBuilder,
sample_number_pref: NumberOfSamples,
}
impl<T> Builder<'_, T> {
/// Configure surface attributes.
///
/// If not called glutin default settings are used.
pub fn surface_attributes(
mut self,
surface_attrs: SurfaceAttributesBuilder<WindowSurface>,
) -> Self {
self.surface_attrs = Some(surface_attrs);
self
}
/// Configure context attributes.
///
/// If not called glutin default settings are used.
pub fn context_attributes(mut self, ctx_attrs: ContextAttributesBuilder) -> Self {
self.ctx_attrs = ctx_attrs;
self
}
/// Configure [`ConfigTemplateBuilder`].
pub fn config_template(mut self, conf: ConfigTemplateBuilder) -> Self {
self.config_attrs = conf;
self
}
/// Configure [`NumberOfSamples`] preference.
///
/// Default `0` / no samples.
pub fn number_of_samples(mut self, pref: impl Into<NumberOfSamples>) -> Self {
self.sample_number_pref = pref.into();
self
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build<Color, Depth>(self) -> Result<Init<Color, Depth>, Box<dyn Error>>
where
Color: RenderFormat,
Depth: DepthFormat,
{
self.build_raw(Color::get_format(), Depth::get_format())
.map(|i| i.into_typed())
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build_raw(
self,
color_format: Format,
depth_format: Format,
) -> Result<RawInit, Box<dyn Error>> {
let Format(color_surface, color_channel) = color_format;
let color_total_bits = color_surface.get_total_bits();
let alpha_bits = color_surface.get_alpha_stencil_bits();
let depth_total_bits = depth_format.0.get_total_bits();
let stencil_bits = depth_format.0.get_alpha_stencil_bits();
let srgb = color_channel == ChannelType::Srgb;
let surface_attrs = self
.surface_attrs
.unwrap_or_else(|| SurfaceAttributesBuilder::new().with_srgb(srgb.then_some(true)));
let config_attrs = self | .with_stencil_size(stencil_bits);
let mut no_suitable_config = false;
let (window, gl_config) = glutin_winit::DisplayBuilder::new()
.with_window_builder(Some(self.winit))
.build(self.event_loop, config_attrs, |configs| {
let mut configs: Vec<_> = configs.collect();
assert!(!configs.is_empty(), "no gl configs?");
let best = self
.sample_number_pref
.find(configs.iter().enumerate().filter(|(_, c)| {
let color_bits = match c.color_buffer_type() {
None => 0,
Some(ColorBufferType::Luminance(s)) => s,
Some(ColorBufferType::Rgb {
r_size,
g_size,
b_size,
}) => r_size + g_size + b_size,
};
(!srgb || c.srgb_capable())
&& color_bits == color_total_bits - alpha_bits
&& c.alpha_size() == alpha_bits
&& c.depth_size() == depth_total_bits - stencil_bits
&& c.stencil_size() == stencil_bits
}));
match best {
Some((idx, _)) => configs.swap_remove(idx),
None => {
no_suitable_config = true;
configs.swap_remove(0)
}
}
})?;
if no_suitable_config {
return Err("no suitable gl config found, color+depth not supported?".into());
}
let window = window.unwrap(); // set in display builder
let raw_window_handle = window.raw_window_handle();
let gl_display = gl_config.display();
let (gl_surface, gl_context) = {
let ctx_attrs = self.ctx_attrs.build(Some(raw_window_handle));
let surface_attrs = window.build_surface_attributes(surface_attrs);
let surface = unsafe { gl_display.create_window_surface(&gl_config, &surface_attrs)? };
let context = unsafe { gl_display.create_context(&gl_config, &ctx_attrs)? }
.make_current(&surface)?;
(surface, context)
};
let (device, factory) =
gfx_device_gl::create(|s| gl_display.get_proc_address(&CString::new(s).unwrap()) as _);
let window_size = window.inner_size();
let tex_dimensions = (
window_size.width as _,
window_size.height as _,
1,
gl_config.num_samples().into(),
);
let (color_view, depth_view) =
gfx_device_gl::create_main_targets_raw(tex_dimensions, color_surface, depth_format.0);
Ok(RawInit {
window,
gl_config,
gl_surface,
gl_context,
device,
factory,
color_view,
depth_view,
})
}
}
/// Initialised winit, glutin & gfx state.
#[non_exhaustive]
pub struct InitState<ColorView, DepthView> {
// winit
pub window: winit::window::Window,
// glutin
pub gl_config: glutin::config::Config,
pub gl_surface: glutin::surface::Surface<WindowSurface>,
pub gl_context: glutin::context::PossiblyCurrentContext,
// gfx
pub device: gfx_device_gl::Device,
pub factory: gfx_device_gl::Factory,
pub color_view: ColorView,
pub depth_view: DepthView,
}
/// "Raw" initialised winit, glutin & gfx state.
pub type RawInit = InitState<RawRenderTargetView<R>, RawDepthStencilView<R>>;
/// Initialised winit, glutin & gfx state.
pub type Init<Color, Depth> = InitState<RenderTargetView<R, Color>, DepthStencilView<R, Depth>>;
impl RawInit {
fn into_typed<Color: RenderFormat, Depth: DepthFormat>(self) -> Init<Color, Depth> {
Init {
window: self.window,
gl_config: self.gl_config,
gl_surface: self.gl_surface,
gl_context: self.gl_context,
device: self.device,
factory: self.factory,
color_view: Typed::new(self.color_view),
depth_view: Typed::new(self.depth_view),
}
}
}
/// Recreate and replace gfx views if the dimensions have changed.
pub fn resize_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &mut RenderTargetView<R, Color>,
depth_view: &mut DepthStencilView<R, Depth>,
) {
if let Some((cv, dv)) = resized_views(new_size, color_view, depth_view) {
*color_view = cv;
*depth_view = dv;
}
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &RenderTargetView<R, Color>,
depth_view: &DepthStencilView<R, Depth>,
) -> Option<(RenderTargetView<R, Color>, DepthStencilView<R, Depth>)> {
let old_dimensions = color_view.get_dimensions();
debug_assert_eq!(old_dimensions, depth_view.get_dimensions());
let (cv, dv) = resized_views_raw(
new_size,
old_dimensions,
Color::get_format(),
Depth::get_format(),
)?;
Some((Typed::new(cv), Typed::new(dv)))
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views_raw(
new_size: winit::dpi::PhysicalSize<u32>,
old_dimensions: texture::Dimensions,
color_fmt: Format,
ds_fmt: Format,
) -> Option<(RawRenderTargetView<R>, RawDepthStencilView<R>)> {
let new_dimensions = (
new_size.width as _,
new_size.height as _,
old_dimensions.2,
old_dimensions.3,
);
if old_dimensions == new_dimensions {
return None;
}
Some(gfx_device_gl::create_main_targets_raw(
new_dimensions,
color_fmt.0,
ds_fmt.0,
))
}
/// Preference for picking [`glutin::config::GlConfig::num_samples`].
#[derive(Debug, Clone, Copy)]
pub enum NumberOfSamples {
/// Pick a config with the highest number of samples.
Max,
/// Pick a config with a specific number of samples.
///
/// E.g. `Specific(0)` mean no multisamples.
Specific(u8),
}
impl Default for NumberOfSamples {
fn default() -> Self {
Self::Specific(0)
}
}
impl From<u8> for NumberOfSamples {
fn from(val: u8) -> Self {
Self::Specific(val)
}
}
impl NumberOfSamples {
fn find<'a>(
self,
mut configs: impl Iterator<Item = (usize, &'a glutin::config::Config)>,
) -> Option<(usize, &'a glutin::config::Config)> {
match self {
Self::Max => configs.max_by_key(|(_, c)| c.num_samples()),
Self::Specific(n) => configs.find(|(_, c)| c.num_samples() == n),
}
}
} | .config_attrs
.with_alpha_size(alpha_bits)
.with_depth_size(depth_total_bits - stencil_bits) | random_line_split |
ply_loader.rs | use std::io::{Read, Seek, BufReader, BufRead, SeekFrom};
use std::error;
use std::fmt;
use crate::model::ply::{PlyFileHeader, PlyElementDescriptor, standard_formats, PlyPropertyDescriptor, PlyScalar, PlyDatatype};
use std::str::{SplitAsciiWhitespace, FromStr};
use byteorder::{LittleEndian, ByteOrder};
use num::{self, NumCast};
use std::marker::PhantomData;
pub struct PlyMeshLoader<'r, R: Read + Seek> {
reader: &'r mut R,
// file_header: Option<PlyFileHeader>,
// parse_state: Option<FileParseState>,
}
impl<'r, R: Read + Seek> PlyMeshLoader<'r, R> {
pub fn parse_header(self) -> Result<PlyDataPuller<'r, R>, Box<dyn error::Error>> {
fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
Err(Box::from(PlyError::new(message)))
}
// if let None = self.file_header {
// Make buf reader
let mut buf_reader = BufReader::new(self.reader);
// Read file header
let mut lines = (&mut buf_reader).lines();
let mut element_vec: Vec<PlyElementDescriptor> = Vec::new();
let mut current_element: Option<PlyElementDescriptor> = None;
let mut i = 0;
// let mut k = 0;
'header_loop: loop {
let line = if let Some(l) = lines.next() {
if let Ok(l) = l {
l
}
else {
return Err(Box::from(l.unwrap_err()));
}
}
else {
return ply_err("Header missing required fields or has no 'end_header' line");
};
// // DEBUG:
// println!("DEBUG: line: {}", line);
// if k > 40 {
// break;
// }
// k += 1;
// Ignore comment lines
if line.starts_with("comment") {
continue 'header_loop;
}
// End of header
if line.as_str().eq("end_header") {
break 'header_loop;
}
// Magic number
if i == 0 {
if!line.as_str().eq("ply") {
return ply_err("Header missing ply fingerprint");
}
i = 1;
}
// Format and version
else if i == 1 {
if!line.starts_with("format") {
return ply_err("Header missing ply format line")
}
if!line.as_str().eq("format ascii 1.0") {
return ply_err("Unknown or invalid ply format (only ascii 1.0 is currently supported)");
}
i = 2;
}
// Element descriptor
else if line.starts_with("element") {
// Put previous descriptor into list if we have one
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'element' token
let elem_name = String::from({
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
a.unwrap()
});
let num_entries = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
let a = a.unwrap();
| a.unwrap()
};
// Make new descriptor
let elem_index = element_vec.len() as u32;
current_element = Some(PlyElementDescriptor::new(elem_index, elem_name, num_entries));
}
// Property descriptor
else if line.starts_with("property") {
// Check that we are actually in an element
if let None = current_element {
return ply_err("Misplaced property line outside of element descriptor");
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'property' token
let prop_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor");
}
let a = a.unwrap();
if a.eq("list") {
let list_index_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list index type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list index type"),
}
};
let list_data_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list data type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list data type"),
}
};
PlyDatatype::List {
index: list_index_type,
element: list_data_type,
}
}
else {
match PlyScalar::from_str(a) {
Some(s) => PlyDatatype::Scalar(s),
None => return ply_err("Unkown type in property descriptor")
}
}
};
let prop_name = {
let a = split_line.next();
let a = if let Some(a) = a {
String::from(a)
}
else {
return ply_err("Invalid property descriptor: Invalid name");
};
a
};
// Create property descriptor
let property_descriptor = PlyPropertyDescriptor {
name: prop_name,
datatype: prop_type,
};
// Add to current element
current_element.as_mut().unwrap().properties.push(property_descriptor);
}
}
// Put last descriptor into list
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Create file header
let file_header = PlyFileHeader {
format: standard_formats::ASCII_10,
elements: element_vec,
};
// Get back our file at the proper position
let real_seek_pos = buf_reader.seek(SeekFrom::Current(0)).map_err(|_| PlyError::new("Failed to seek file pos after header (this is probably a bug)"))?;
let reader = buf_reader.into_inner();
reader.seek(SeekFrom::Start(real_seek_pos))?;
// Make puller
let puller = PlyDataPuller {
buf_reader: BufReader::new(reader),
file_header,
parse_state: None,
_phantom: PhantomData,
};
return Ok(puller);
// }
// else {
// return ply_err("Cannot parse header more than once");
// }
}
pub fn new(source: &'r mut R) -> PlyMeshLoader<'r, R> {
PlyMeshLoader {
reader: source,
}
}
}
pub struct PlyDataPuller<'r, R: Read + Seek> {
buf_reader: BufReader<&'r mut R>,
file_header: PlyFileHeader,
parse_state: Option<FileParseState>,
_phantom: PhantomData<()>
}
impl<'r, R: Read + Seek> PlyDataPuller<'r, R> {
pub fn next_event<'a>(&'a mut self) -> PullEvent<'a, 'r, R> {
return if self.parse_state.is_none() {
if self.file_header.elements.len() <= 0 {
return PullEvent::End
}
// Create initial parse state
self.parse_state = Some(FileParseState {
current_element_index: 0,
// entries_left: self.file_header.elements.first().unwrap().num_entries,
});
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.first().unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
// If we still have elements left update index
let state = self.parse_state.as_mut().unwrap();
if state.current_element_index < self.file_header.elements.len().saturating_sub(1) as u32 {
state.current_element_index += 1;
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.get(state.current_element_index as usize).unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
PullEvent::End
}
}
}
pub fn header(&self) -> &PlyFileHeader {
&self.file_header
}
}
struct FileParseState {
current_element_index: u32
}
pub enum PullEvent<'a, 'r: 'a, R: Read + Seek> {
Element(PlyElementParser<'a, 'r, R>),
End,
}
impl<'a, 'r: 'a, R: Read + Seek> PullEvent<'a, 'r, R> {
}
pub struct PlyElementParser<'a, 'r, R: Read + Seek> {
buf_reader: &'a mut BufReader<&'r mut R>,
// parse_state: &'a mut FileParseState,
element_descriptor: &'a PlyElementDescriptor,
// full_element_size: u32,
entries_left: u32,
}
impl<'a, 'r: 'a, R: Read + Seek> PlyElementParser<'a, 'r, R> {
pub fn read_entry(&mut self, buffer: &mut [u8]) -> Result<(), PlyReadError> {
// fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
// Err(Box::from(PlyError::new(message)))
// }
// Return appropriate error if no more lines are left
if self.entries_left <= 0 {
return Err(PlyReadError::NoMoreEntries);
}
// Get initial stream pos so we can rewind later when the given buffer is
// too small.
// NOTE: This discards the internal buffer of the buffered reader so this
// is fcking stupid, but without implementing it myself there is no other way
let initial_stream_pos = match self.buf_reader.seek(SeekFrom::Current(0)) {
Ok(pos) => pos,
Err(err) => return Err(PlyReadError::Other(Box::new(err))),
};
let mut lines = self.buf_reader.lines();
let mut buffer_pos = 0usize;
let mut only_measuring_size = false;
// Get line
let line = lines.next();
let line = if let Some(l) = line {
if let Ok(l) = l {
l
} else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
}
} else {
// return ply_err("Unexpectedly no more lines left")
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
};
// Split line at whitespace
let mut split_line = line.split_ascii_whitespace();
// Read entry line
for p in &self.element_descriptor.properties {
fn write_value<T: NumCast>(scalar_type: PlyScalar, value: T, data_size: usize, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) {
// Buffer is too small, eventually return a TooSmall error but
// for now only set the flag so we can continue calculating the
// actually needed buffer size
let final_pos = *buffer_pos + data_size;
if buffer.len() < final_pos {
*only_measure = true;
}
if *only_measure {
*buffer_pos += data_size; // Increment anyway so we know what the final needed buffer size is
}
else {
// Get offset buffer slice
let slice = &mut buffer[*buffer_pos..final_pos];
match scalar_type {
S::uchar => slice[0] = num::cast::<_, u8>(value).unwrap(),
S::uint => LittleEndian::write_u32(slice, num::cast::<_, u32>(value).unwrap()),
S::float => LittleEndian::write_f32(slice, num::cast::<_, f32>(value).unwrap()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
// Increment buffer pos
*buffer_pos += data_size;
}
}
fn process_value<T: Copy + FromStr + NumCast>(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<T, PlyReadError> {
let value_str = if let Some(s) = split_line.next() {
s
} else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Missing property value"))));
};
let val: T = match value_str.parse::<T>() {
Ok(val) => val,
Err(_err) => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Failed to parse value")))),
};
// Write the value into the buffer
write_value::<T>(scalar_type, val, std::mem::size_of::<T>(), buffer, buffer_pos, only_measure);
Ok(val as T)
}
fn process_scalar_uncast(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<(), PlyReadError> {
match scalar_type {
S::uchar => process_value::<u8>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::uint => process_value::<u32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::float => process_value::<f32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
}
use PlyScalar as S;
match p.datatype {
PlyDatatype::Scalar(scalar) => {
process_scalar_uncast(scalar, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
PlyDatatype::List {index, element} => {
let num_elements = match index {
S::uchar => process_value::<u8>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::ushort => process_value::<u16>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::uint => process_value::<u32>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
_ => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid list index datatype: Only uchar, ushort and uint are valid")))),
};
for _ in 0..num_elements {
process_scalar_uncast(element, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
}
}
}
if only_measuring_size {
// Rewind reader
if let Err(e) = self.buf_reader.seek(SeekFrom::Start(initial_stream_pos)) {
return Err(PlyReadError::Other(Box::new(e)));
}
// Return the min buffer size based on the final offset (since we still go over all elements even if the buffer is too small)
Err(PlyReadError::BufferTooSmall {min_buffer_size: buffer_pos})
}
else {
self.entries_left -= 1;
Ok(())
}
}
pub fn element_descriptor(&self) -> &'a PlyElementDescriptor {
self.element_descriptor
}
fn new(reader: &'a mut BufReader<&'r mut R>, element_descriptor: &'a PlyElementDescriptor, _parse_state: &'a mut FileParseState) -> PlyElementParser<'a, 'r, R> {
// // Calc full element size
// let mut full_element_size = 0u32;
// for p in &element_descriptor.properties {
// full_element_size += p.datatype.byte_size();
// }
let entries_left = element_descriptor.num_entries;
PlyElementParser {
buf_reader: reader,
element_descriptor,
// full_element_size,
// parse_state,
entries_left,
}
}
}
//mod generic_byteorder {
// use byteorder::{WriteBytesExt, LittleEndian, ByteOrder};
//
// pub trait GenericByteOrder<E: ByteOrder> {
// fn write_into_slice(self, buffer: &mut [u8]);
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for f32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_f32(buffer, self)
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u8 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// buffer[0] = self
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_u32(buffer, self)
// }
// }
//}
pub enum PlyReadError {
NoMoreEntries,
BufferTooSmall {
min_buffer_size: usize,
},
Other(Box<dyn error::Error>),
}
impl error::Error for PlyReadError {}
impl fmt::Display for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
use PlyReadError as E;
match self {
E::NoMoreEntries => write!(f, "PlyReadError: No more entries"),
E::BufferTooSmall {min_buffer_size} => write!(f, "PlyReadError: Buffer too small: min size = {}", min_buffer_size),
E::Other(error) => <Box<dyn error::Error> as fmt::Display>::fmt(error, f)
}
}
}
impl fmt::Debug for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub struct PlyError {
message: &'static str,
}
impl PlyError {
pub fn new(message: &'static str) -> PlyError {
PlyError {
message
}
}
}
impl error::Error for PlyError {}
impl fmt::Display for PlyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "PlyError: {}", self.message)
}
}
impl fmt::Debug for PlyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub fn dump_ply_header(header: &PlyFileHeader) {
for element in &header.elements {
println!("element '{}' {}", element.name, element.num_entries);
for property in &element.properties {
println!(" property '{}' {:?}", property.name, property.datatype)
}
}
}
/*
pub fn test() -> Result<(), Box<dyn error::Error>> {
let mut file = OpenOptions::new().read(true).open(r"C:\Users\Jan\Desktop\Lee Head\Lee Head.ply")?;
let loader = PlyMeshLoader::new(&mut file);
let mut puller = loader.parse_header()?;
dump_ply_header(&puller.file_header);
// let mut puller = RefCell::new(puller);
loop {
// let mut borrowed_puller = puller.borrow_mut();
match puller.next_event() {
PullEvent::Element(mut parser) => {
let mut buffer = [0u8; 32];
let res = parser.read_entry(&mut buffer);
if let Err(PlyReadError::BufferTooSmall {min_buffer_size}) = res {
println!("Buffer too small! (min {})", min_buffer_size);
return Ok(());
}
else if let Ok(_) = res {
let mut pos = 0;
for p in parser.element_descriptor.properties() {
match p.datatype {
PlyDatatype::Scalar(scalar) => {
let final_pos = pos + scalar.byte_size();
match scalar {
PlyScalar::float => {
let val = LittleEndian::read_f32(&buffer[(pos as usize)..(final_pos as usize)]);
println!("f32({})", val);
},
_ => unimplemented!()
}
pos = final_pos;
},
PlyDatatype::List {index, element} => {
}
}
}
}
}
PullEvent::End => break,
}
break;
}
Ok(())
}
*/ | let a = a.parse::<u32>();
if a.is_err() {
return ply_err("Invalid element descriptor");
} | random_line_split |
ply_loader.rs | use std::io::{Read, Seek, BufReader, BufRead, SeekFrom};
use std::error;
use std::fmt;
use crate::model::ply::{PlyFileHeader, PlyElementDescriptor, standard_formats, PlyPropertyDescriptor, PlyScalar, PlyDatatype};
use std::str::{SplitAsciiWhitespace, FromStr};
use byteorder::{LittleEndian, ByteOrder};
use num::{self, NumCast};
use std::marker::PhantomData;
pub struct PlyMeshLoader<'r, R: Read + Seek> {
reader: &'r mut R,
// file_header: Option<PlyFileHeader>,
// parse_state: Option<FileParseState>,
}
impl<'r, R: Read + Seek> PlyMeshLoader<'r, R> {
pub fn parse_header(self) -> Result<PlyDataPuller<'r, R>, Box<dyn error::Error>> {
fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
Err(Box::from(PlyError::new(message)))
}
// if let None = self.file_header {
// Make buf reader
let mut buf_reader = BufReader::new(self.reader);
// Read file header
let mut lines = (&mut buf_reader).lines();
let mut element_vec: Vec<PlyElementDescriptor> = Vec::new();
let mut current_element: Option<PlyElementDescriptor> = None;
let mut i = 0;
// let mut k = 0;
'header_loop: loop {
let line = if let Some(l) = lines.next() {
if let Ok(l) = l {
l
}
else {
return Err(Box::from(l.unwrap_err()));
}
}
else {
return ply_err("Header missing required fields or has no 'end_header' line");
};
// // DEBUG:
// println!("DEBUG: line: {}", line);
// if k > 40 {
// break;
// }
// k += 1;
// Ignore comment lines
if line.starts_with("comment") {
continue 'header_loop;
}
// End of header
if line.as_str().eq("end_header") {
break 'header_loop;
}
// Magic number
if i == 0 {
if!line.as_str().eq("ply") {
return ply_err("Header missing ply fingerprint");
}
i = 1;
}
// Format and version
else if i == 1 {
if!line.starts_with("format") {
return ply_err("Header missing ply format line")
}
if!line.as_str().eq("format ascii 1.0") {
return ply_err("Unknown or invalid ply format (only ascii 1.0 is currently supported)");
}
i = 2;
}
// Element descriptor
else if line.starts_with("element") {
// Put previous descriptor into list if we have one
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'element' token
let elem_name = String::from({
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
a.unwrap()
});
let num_entries = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
let a = a.unwrap();
let a = a.parse::<u32>();
if a.is_err() {
return ply_err("Invalid element descriptor");
}
a.unwrap()
};
// Make new descriptor
let elem_index = element_vec.len() as u32;
current_element = Some(PlyElementDescriptor::new(elem_index, elem_name, num_entries));
}
// Property descriptor
else if line.starts_with("property") {
// Check that we are actually in an element
if let None = current_element {
return ply_err("Misplaced property line outside of element descriptor");
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'property' token
let prop_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor");
}
let a = a.unwrap();
if a.eq("list") {
let list_index_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list index type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list index type"),
}
};
let list_data_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list data type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list data type"),
}
};
PlyDatatype::List {
index: list_index_type,
element: list_data_type,
}
}
else {
match PlyScalar::from_str(a) {
Some(s) => PlyDatatype::Scalar(s),
None => return ply_err("Unkown type in property descriptor")
}
}
};
let prop_name = {
let a = split_line.next();
let a = if let Some(a) = a {
String::from(a)
}
else {
return ply_err("Invalid property descriptor: Invalid name");
};
a
};
// Create property descriptor
let property_descriptor = PlyPropertyDescriptor {
name: prop_name,
datatype: prop_type,
};
// Add to current element
current_element.as_mut().unwrap().properties.push(property_descriptor);
}
}
// Put last descriptor into list
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Create file header
let file_header = PlyFileHeader {
format: standard_formats::ASCII_10,
elements: element_vec,
};
// Get back our file at the proper position
let real_seek_pos = buf_reader.seek(SeekFrom::Current(0)).map_err(|_| PlyError::new("Failed to seek file pos after header (this is probably a bug)"))?;
let reader = buf_reader.into_inner();
reader.seek(SeekFrom::Start(real_seek_pos))?;
// Make puller
let puller = PlyDataPuller {
buf_reader: BufReader::new(reader),
file_header,
parse_state: None,
_phantom: PhantomData,
};
return Ok(puller);
// }
// else {
// return ply_err("Cannot parse header more than once");
// }
}
pub fn new(source: &'r mut R) -> PlyMeshLoader<'r, R> {
PlyMeshLoader {
reader: source,
}
}
}
pub struct PlyDataPuller<'r, R: Read + Seek> {
buf_reader: BufReader<&'r mut R>,
file_header: PlyFileHeader,
parse_state: Option<FileParseState>,
_phantom: PhantomData<()>
}
impl<'r, R: Read + Seek> PlyDataPuller<'r, R> {
pub fn next_event<'a>(&'a mut self) -> PullEvent<'a, 'r, R> {
return if self.parse_state.is_none() {
if self.file_header.elements.len() <= 0 {
return PullEvent::End
}
// Create initial parse state
self.parse_state = Some(FileParseState {
current_element_index: 0,
// entries_left: self.file_header.elements.first().unwrap().num_entries,
});
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.first().unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
// If we still have elements left update index
let state = self.parse_state.as_mut().unwrap();
if state.current_element_index < self.file_header.elements.len().saturating_sub(1) as u32 {
state.current_element_index += 1;
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.get(state.current_element_index as usize).unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
PullEvent::End
}
}
}
pub fn header(&self) -> &PlyFileHeader {
&self.file_header
}
}
struct FileParseState {
current_element_index: u32
}
pub enum PullEvent<'a, 'r: 'a, R: Read + Seek> {
Element(PlyElementParser<'a, 'r, R>),
End,
}
impl<'a, 'r: 'a, R: Read + Seek> PullEvent<'a, 'r, R> {
}
pub struct PlyElementParser<'a, 'r, R: Read + Seek> {
buf_reader: &'a mut BufReader<&'r mut R>,
// parse_state: &'a mut FileParseState,
element_descriptor: &'a PlyElementDescriptor,
// full_element_size: u32,
entries_left: u32,
}
impl<'a, 'r: 'a, R: Read + Seek> PlyElementParser<'a, 'r, R> {
pub fn read_entry(&mut self, buffer: &mut [u8]) -> Result<(), PlyReadError> {
// fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
// Err(Box::from(PlyError::new(message)))
// }
// Return appropriate error if no more lines are left
if self.entries_left <= 0 {
return Err(PlyReadError::NoMoreEntries);
}
// Get initial stream pos so we can rewind later when the given buffer is
// too small.
// NOTE: This discards the internal buffer of the buffered reader so this
// is fcking stupid, but without implementing it myself there is no other way
let initial_stream_pos = match self.buf_reader.seek(SeekFrom::Current(0)) {
Ok(pos) => pos,
Err(err) => return Err(PlyReadError::Other(Box::new(err))),
};
let mut lines = self.buf_reader.lines();
let mut buffer_pos = 0usize;
let mut only_measuring_size = false;
// Get line
let line = lines.next();
let line = if let Some(l) = line {
if let Ok(l) = l {
l
} else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
}
} else {
// return ply_err("Unexpectedly no more lines left")
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
};
// Split line at whitespace
let mut split_line = line.split_ascii_whitespace();
// Read entry line
for p in &self.element_descriptor.properties {
fn write_value<T: NumCast>(scalar_type: PlyScalar, value: T, data_size: usize, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) {
// Buffer is too small, eventually return a TooSmall error but
// for now only set the flag so we can continue calculating the
// actually needed buffer size
let final_pos = *buffer_pos + data_size;
if buffer.len() < final_pos {
*only_measure = true;
}
if *only_measure {
*buffer_pos += data_size; // Increment anyway so we know what the final needed buffer size is
}
else {
// Get offset buffer slice
let slice = &mut buffer[*buffer_pos..final_pos];
match scalar_type {
S::uchar => slice[0] = num::cast::<_, u8>(value).unwrap(),
S::uint => LittleEndian::write_u32(slice, num::cast::<_, u32>(value).unwrap()),
S::float => LittleEndian::write_f32(slice, num::cast::<_, f32>(value).unwrap()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
// Increment buffer pos
*buffer_pos += data_size;
}
}
fn process_value<T: Copy + FromStr + NumCast>(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<T, PlyReadError> {
let value_str = if let Some(s) = split_line.next() {
s
} else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Missing property value"))));
};
let val: T = match value_str.parse::<T>() {
Ok(val) => val,
Err(_err) => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Failed to parse value")))),
};
// Write the value into the buffer
write_value::<T>(scalar_type, val, std::mem::size_of::<T>(), buffer, buffer_pos, only_measure);
Ok(val as T)
}
fn process_scalar_uncast(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<(), PlyReadError> {
match scalar_type {
S::uchar => process_value::<u8>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::uint => process_value::<u32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::float => process_value::<f32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
}
use PlyScalar as S;
match p.datatype {
PlyDatatype::Scalar(scalar) => {
process_scalar_uncast(scalar, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
PlyDatatype::List {index, element} => {
let num_elements = match index {
S::uchar => process_value::<u8>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::ushort => process_value::<u16>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::uint => process_value::<u32>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
_ => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid list index datatype: Only uchar, ushort and uint are valid")))),
};
for _ in 0..num_elements {
process_scalar_uncast(element, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
}
}
}
if only_measuring_size {
// Rewind reader
if let Err(e) = self.buf_reader.seek(SeekFrom::Start(initial_stream_pos)) {
return Err(PlyReadError::Other(Box::new(e)));
}
// Return the min buffer size based on the final offset (since we still go over all elements even if the buffer is too small)
Err(PlyReadError::BufferTooSmall {min_buffer_size: buffer_pos})
}
else {
self.entries_left -= 1;
Ok(())
}
}
pub fn element_descriptor(&self) -> &'a PlyElementDescriptor {
self.element_descriptor
}
fn new(reader: &'a mut BufReader<&'r mut R>, element_descriptor: &'a PlyElementDescriptor, _parse_state: &'a mut FileParseState) -> PlyElementParser<'a, 'r, R> {
// // Calc full element size
// let mut full_element_size = 0u32;
// for p in &element_descriptor.properties {
// full_element_size += p.datatype.byte_size();
// }
let entries_left = element_descriptor.num_entries;
PlyElementParser {
buf_reader: reader,
element_descriptor,
// full_element_size,
// parse_state,
entries_left,
}
}
}
//mod generic_byteorder {
// use byteorder::{WriteBytesExt, LittleEndian, ByteOrder};
//
// pub trait GenericByteOrder<E: ByteOrder> {
// fn write_into_slice(self, buffer: &mut [u8]);
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for f32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_f32(buffer, self)
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u8 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// buffer[0] = self
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_u32(buffer, self)
// }
// }
//}
pub enum PlyReadError {
NoMoreEntries,
BufferTooSmall {
min_buffer_size: usize,
},
Other(Box<dyn error::Error>),
}
impl error::Error for PlyReadError {}
impl fmt::Display for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
use PlyReadError as E;
match self {
E::NoMoreEntries => write!(f, "PlyReadError: No more entries"),
E::BufferTooSmall {min_buffer_size} => write!(f, "PlyReadError: Buffer too small: min size = {}", min_buffer_size),
E::Other(error) => <Box<dyn error::Error> as fmt::Display>::fmt(error, f)
}
}
}
impl fmt::Debug for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub struct PlyError {
message: &'static str,
}
impl PlyError {
pub fn new(message: &'static str) -> PlyError {
PlyError {
message
}
}
}
impl error::Error for PlyError {}
impl fmt::Display for PlyError {
fn | (&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "PlyError: {}", self.message)
}
}
impl fmt::Debug for PlyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub fn dump_ply_header(header: &PlyFileHeader) {
for element in &header.elements {
println!("element '{}' {}", element.name, element.num_entries);
for property in &element.properties {
println!(" property '{}' {:?}", property.name, property.datatype)
}
}
}
/*
pub fn test() -> Result<(), Box<dyn error::Error>> {
let mut file = OpenOptions::new().read(true).open(r"C:\Users\Jan\Desktop\Lee Head\Lee Head.ply")?;
let loader = PlyMeshLoader::new(&mut file);
let mut puller = loader.parse_header()?;
dump_ply_header(&puller.file_header);
// let mut puller = RefCell::new(puller);
loop {
// let mut borrowed_puller = puller.borrow_mut();
match puller.next_event() {
PullEvent::Element(mut parser) => {
let mut buffer = [0u8; 32];
let res = parser.read_entry(&mut buffer);
if let Err(PlyReadError::BufferTooSmall {min_buffer_size}) = res {
println!("Buffer too small! (min {})", min_buffer_size);
return Ok(());
}
else if let Ok(_) = res {
let mut pos = 0;
for p in parser.element_descriptor.properties() {
match p.datatype {
PlyDatatype::Scalar(scalar) => {
let final_pos = pos + scalar.byte_size();
match scalar {
PlyScalar::float => {
let val = LittleEndian::read_f32(&buffer[(pos as usize)..(final_pos as usize)]);
println!("f32({})", val);
},
_ => unimplemented!()
}
pos = final_pos;
},
PlyDatatype::List {index, element} => {
}
}
}
}
}
PullEvent::End => break,
}
break;
}
Ok(())
}
*/
| fmt | identifier_name |
ply_loader.rs | use std::io::{Read, Seek, BufReader, BufRead, SeekFrom};
use std::error;
use std::fmt;
use crate::model::ply::{PlyFileHeader, PlyElementDescriptor, standard_formats, PlyPropertyDescriptor, PlyScalar, PlyDatatype};
use std::str::{SplitAsciiWhitespace, FromStr};
use byteorder::{LittleEndian, ByteOrder};
use num::{self, NumCast};
use std::marker::PhantomData;
pub struct PlyMeshLoader<'r, R: Read + Seek> {
reader: &'r mut R,
// file_header: Option<PlyFileHeader>,
// parse_state: Option<FileParseState>,
}
impl<'r, R: Read + Seek> PlyMeshLoader<'r, R> {
pub fn parse_header(self) -> Result<PlyDataPuller<'r, R>, Box<dyn error::Error>> {
fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
Err(Box::from(PlyError::new(message)))
}
// if let None = self.file_header {
// Make buf reader
let mut buf_reader = BufReader::new(self.reader);
// Read file header
let mut lines = (&mut buf_reader).lines();
let mut element_vec: Vec<PlyElementDescriptor> = Vec::new();
let mut current_element: Option<PlyElementDescriptor> = None;
let mut i = 0;
// let mut k = 0;
'header_loop: loop {
let line = if let Some(l) = lines.next() {
if let Ok(l) = l {
l
}
else {
return Err(Box::from(l.unwrap_err()));
}
}
else {
return ply_err("Header missing required fields or has no 'end_header' line");
};
// // DEBUG:
// println!("DEBUG: line: {}", line);
// if k > 40 {
// break;
// }
// k += 1;
// Ignore comment lines
if line.starts_with("comment") {
continue 'header_loop;
}
// End of header
if line.as_str().eq("end_header") {
break 'header_loop;
}
// Magic number
if i == 0 {
if!line.as_str().eq("ply") {
return ply_err("Header missing ply fingerprint");
}
i = 1;
}
// Format and version
else if i == 1 {
if!line.starts_with("format") {
return ply_err("Header missing ply format line")
}
if!line.as_str().eq("format ascii 1.0") {
return ply_err("Unknown or invalid ply format (only ascii 1.0 is currently supported)");
}
i = 2;
}
// Element descriptor
else if line.starts_with("element") {
// Put previous descriptor into list if we have one
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'element' token
let elem_name = String::from({
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
a.unwrap()
});
let num_entries = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
let a = a.unwrap();
let a = a.parse::<u32>();
if a.is_err() {
return ply_err("Invalid element descriptor");
}
a.unwrap()
};
// Make new descriptor
let elem_index = element_vec.len() as u32;
current_element = Some(PlyElementDescriptor::new(elem_index, elem_name, num_entries));
}
// Property descriptor
else if line.starts_with("property") {
// Check that we are actually in an element
if let None = current_element {
return ply_err("Misplaced property line outside of element descriptor");
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'property' token
let prop_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor");
}
let a = a.unwrap();
if a.eq("list") {
let list_index_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list index type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list index type"),
}
};
let list_data_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list data type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list data type"),
}
};
PlyDatatype::List {
index: list_index_type,
element: list_data_type,
}
}
else {
match PlyScalar::from_str(a) {
Some(s) => PlyDatatype::Scalar(s),
None => return ply_err("Unkown type in property descriptor")
}
}
};
let prop_name = {
let a = split_line.next();
let a = if let Some(a) = a {
String::from(a)
}
else {
return ply_err("Invalid property descriptor: Invalid name");
};
a
};
// Create property descriptor
let property_descriptor = PlyPropertyDescriptor {
name: prop_name,
datatype: prop_type,
};
// Add to current element
current_element.as_mut().unwrap().properties.push(property_descriptor);
}
}
// Put last descriptor into list
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Create file header
let file_header = PlyFileHeader {
format: standard_formats::ASCII_10,
elements: element_vec,
};
// Get back our file at the proper position
let real_seek_pos = buf_reader.seek(SeekFrom::Current(0)).map_err(|_| PlyError::new("Failed to seek file pos after header (this is probably a bug)"))?;
let reader = buf_reader.into_inner();
reader.seek(SeekFrom::Start(real_seek_pos))?;
// Make puller
let puller = PlyDataPuller {
buf_reader: BufReader::new(reader),
file_header,
parse_state: None,
_phantom: PhantomData,
};
return Ok(puller);
// }
// else {
// return ply_err("Cannot parse header more than once");
// }
}
pub fn new(source: &'r mut R) -> PlyMeshLoader<'r, R> {
PlyMeshLoader {
reader: source,
}
}
}
pub struct PlyDataPuller<'r, R: Read + Seek> {
buf_reader: BufReader<&'r mut R>,
file_header: PlyFileHeader,
parse_state: Option<FileParseState>,
_phantom: PhantomData<()>
}
impl<'r, R: Read + Seek> PlyDataPuller<'r, R> {
pub fn next_event<'a>(&'a mut self) -> PullEvent<'a, 'r, R> {
return if self.parse_state.is_none() {
if self.file_header.elements.len() <= 0 {
return PullEvent::End
}
// Create initial parse state
self.parse_state = Some(FileParseState {
current_element_index: 0,
// entries_left: self.file_header.elements.first().unwrap().num_entries,
});
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.first().unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
// If we still have elements left update index
let state = self.parse_state.as_mut().unwrap();
if state.current_element_index < self.file_header.elements.len().saturating_sub(1) as u32 {
state.current_element_index += 1;
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.get(state.current_element_index as usize).unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
PullEvent::End
}
}
}
pub fn header(&self) -> &PlyFileHeader {
&self.file_header
}
}
struct FileParseState {
current_element_index: u32
}
pub enum PullEvent<'a, 'r: 'a, R: Read + Seek> {
Element(PlyElementParser<'a, 'r, R>),
End,
}
impl<'a, 'r: 'a, R: Read + Seek> PullEvent<'a, 'r, R> {
}
pub struct PlyElementParser<'a, 'r, R: Read + Seek> {
buf_reader: &'a mut BufReader<&'r mut R>,
// parse_state: &'a mut FileParseState,
element_descriptor: &'a PlyElementDescriptor,
// full_element_size: u32,
entries_left: u32,
}
impl<'a, 'r: 'a, R: Read + Seek> PlyElementParser<'a, 'r, R> {
pub fn read_entry(&mut self, buffer: &mut [u8]) -> Result<(), PlyReadError> {
// fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
// Err(Box::from(PlyError::new(message)))
// }
// Return appropriate error if no more lines are left
if self.entries_left <= 0 {
return Err(PlyReadError::NoMoreEntries);
}
// Get initial stream pos so we can rewind later when the given buffer is
// too small.
// NOTE: This discards the internal buffer of the buffered reader so this
// is fcking stupid, but without implementing it myself there is no other way
let initial_stream_pos = match self.buf_reader.seek(SeekFrom::Current(0)) {
Ok(pos) => pos,
Err(err) => return Err(PlyReadError::Other(Box::new(err))),
};
let mut lines = self.buf_reader.lines();
let mut buffer_pos = 0usize;
let mut only_measuring_size = false;
// Get line
let line = lines.next();
let line = if let Some(l) = line {
if let Ok(l) = l {
l
} else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
}
} else {
// return ply_err("Unexpectedly no more lines left")
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
};
// Split line at whitespace
let mut split_line = line.split_ascii_whitespace();
// Read entry line
for p in &self.element_descriptor.properties {
fn write_value<T: NumCast>(scalar_type: PlyScalar, value: T, data_size: usize, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) {
// Buffer is too small, eventually return a TooSmall error but
// for now only set the flag so we can continue calculating the
// actually needed buffer size
let final_pos = *buffer_pos + data_size;
if buffer.len() < final_pos {
*only_measure = true;
}
if *only_measure {
*buffer_pos += data_size; // Increment anyway so we know what the final needed buffer size is
}
else {
// Get offset buffer slice
let slice = &mut buffer[*buffer_pos..final_pos];
match scalar_type {
S::uchar => slice[0] = num::cast::<_, u8>(value).unwrap(),
S::uint => LittleEndian::write_u32(slice, num::cast::<_, u32>(value).unwrap()),
S::float => LittleEndian::write_f32(slice, num::cast::<_, f32>(value).unwrap()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
// Increment buffer pos
*buffer_pos += data_size;
}
}
fn process_value<T: Copy + FromStr + NumCast>(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<T, PlyReadError> {
let value_str = if let Some(s) = split_line.next() | else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Missing property value"))));
};
let val: T = match value_str.parse::<T>() {
Ok(val) => val,
Err(_err) => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Failed to parse value")))),
};
// Write the value into the buffer
write_value::<T>(scalar_type, val, std::mem::size_of::<T>(), buffer, buffer_pos, only_measure);
Ok(val as T)
}
fn process_scalar_uncast(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<(), PlyReadError> {
match scalar_type {
S::uchar => process_value::<u8>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::uint => process_value::<u32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::float => process_value::<f32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
}
use PlyScalar as S;
match p.datatype {
PlyDatatype::Scalar(scalar) => {
process_scalar_uncast(scalar, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
PlyDatatype::List {index, element} => {
let num_elements = match index {
S::uchar => process_value::<u8>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::ushort => process_value::<u16>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::uint => process_value::<u32>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
_ => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid list index datatype: Only uchar, ushort and uint are valid")))),
};
for _ in 0..num_elements {
process_scalar_uncast(element, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
}
}
}
if only_measuring_size {
// Rewind reader
if let Err(e) = self.buf_reader.seek(SeekFrom::Start(initial_stream_pos)) {
return Err(PlyReadError::Other(Box::new(e)));
}
// Return the min buffer size based on the final offset (since we still go over all elements even if the buffer is too small)
Err(PlyReadError::BufferTooSmall {min_buffer_size: buffer_pos})
}
else {
self.entries_left -= 1;
Ok(())
}
}
pub fn element_descriptor(&self) -> &'a PlyElementDescriptor {
self.element_descriptor
}
fn new(reader: &'a mut BufReader<&'r mut R>, element_descriptor: &'a PlyElementDescriptor, _parse_state: &'a mut FileParseState) -> PlyElementParser<'a, 'r, R> {
// // Calc full element size
// let mut full_element_size = 0u32;
// for p in &element_descriptor.properties {
// full_element_size += p.datatype.byte_size();
// }
let entries_left = element_descriptor.num_entries;
PlyElementParser {
buf_reader: reader,
element_descriptor,
// full_element_size,
// parse_state,
entries_left,
}
}
}
//mod generic_byteorder {
// use byteorder::{WriteBytesExt, LittleEndian, ByteOrder};
//
// pub trait GenericByteOrder<E: ByteOrder> {
// fn write_into_slice(self, buffer: &mut [u8]);
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for f32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_f32(buffer, self)
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u8 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// buffer[0] = self
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_u32(buffer, self)
// }
// }
//}
pub enum PlyReadError {
NoMoreEntries,
BufferTooSmall {
min_buffer_size: usize,
},
Other(Box<dyn error::Error>),
}
impl error::Error for PlyReadError {}
impl fmt::Display for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
use PlyReadError as E;
match self {
E::NoMoreEntries => write!(f, "PlyReadError: No more entries"),
E::BufferTooSmall {min_buffer_size} => write!(f, "PlyReadError: Buffer too small: min size = {}", min_buffer_size),
E::Other(error) => <Box<dyn error::Error> as fmt::Display>::fmt(error, f)
}
}
}
impl fmt::Debug for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub struct PlyError {
message: &'static str,
}
impl PlyError {
pub fn new(message: &'static str) -> PlyError {
PlyError {
message
}
}
}
impl error::Error for PlyError {}
impl fmt::Display for PlyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "PlyError: {}", self.message)
}
}
impl fmt::Debug for PlyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub fn dump_ply_header(header: &PlyFileHeader) {
for element in &header.elements {
println!("element '{}' {}", element.name, element.num_entries);
for property in &element.properties {
println!(" property '{}' {:?}", property.name, property.datatype)
}
}
}
/*
pub fn test() -> Result<(), Box<dyn error::Error>> {
let mut file = OpenOptions::new().read(true).open(r"C:\Users\Jan\Desktop\Lee Head\Lee Head.ply")?;
let loader = PlyMeshLoader::new(&mut file);
let mut puller = loader.parse_header()?;
dump_ply_header(&puller.file_header);
// let mut puller = RefCell::new(puller);
loop {
// let mut borrowed_puller = puller.borrow_mut();
match puller.next_event() {
PullEvent::Element(mut parser) => {
let mut buffer = [0u8; 32];
let res = parser.read_entry(&mut buffer);
if let Err(PlyReadError::BufferTooSmall {min_buffer_size}) = res {
println!("Buffer too small! (min {})", min_buffer_size);
return Ok(());
}
else if let Ok(_) = res {
let mut pos = 0;
for p in parser.element_descriptor.properties() {
match p.datatype {
PlyDatatype::Scalar(scalar) => {
let final_pos = pos + scalar.byte_size();
match scalar {
PlyScalar::float => {
let val = LittleEndian::read_f32(&buffer[(pos as usize)..(final_pos as usize)]);
println!("f32({})", val);
},
_ => unimplemented!()
}
pos = final_pos;
},
PlyDatatype::List {index, element} => {
}
}
}
}
}
PullEvent::End => break,
}
break;
}
Ok(())
}
*/
| {
s
} | conditional_block |
main.rs | mod xcb_util;
use log::debug;
use crate::xcb_util::{
geometry::*,
window::WindowExt,
};
use std::str;
use anyhow::{
anyhow,
Error,
};
use structopt::StructOpt;
use xcb::{
base as xbase,
randr as xrandr,
xproto,
};
#[derive(StructOpt)]
struct GlobalOptions {}
#[derive(StructOpt)]
struct Fract {
num: f32,
denom: f32,
}
impl Fract {
fn value(&self) -> f32 { self.num / self.denom }
}
impl std::str::FromStr for Fract {
type Err = Error;
fn from_str(s: &str) -> Result<Fract, Error> {
let parts = s.split('/').collect::<Vec<_>>();
Ok(Fract {
num: f32::from_str(parts[0])?,
denom: f32::from_str(parts[1])?,
})
}
}
struct Geometry<'a> {
pub setup: xproto::Setup<'a>,
pub root_win: xproto::Window,
pub root_win_frame: ScreenRect,
pub srs: xrandr::GetScreenResourcesCurrentReply,
pub display_frames: Vec<ScreenRect>,
pub work_areas: Vec<ScreenRect>,
pub active_window: xproto::Window,
pub active_window_frame: ScreenRect,
pub active_window_insets: ScreenInsets,
}
fn get_geometry(conn: &xbase::Connection) -> Result<Geometry, Error> {
let setup = conn.get_setup();
let screen = setup
.roots()
.next()
.ok_or_else(|| anyhow!("Couldn't unwrap screen 0"))?;
let root_window = screen.root();
let root_window_rect = root_window.get_geometry(&conn)?.as_rect();
let srs = root_window.get_screen_resources_current(&conn)?;
let timestamp = srs.config_timestamp();
let display_frames = srs
.outputs()
.iter()
.filter_map(|o| {
let info = xrandr::get_output_info(&conn, *o, timestamp)
.get_reply()
.ok()?;
match info.connection() as u32 {
xrandr::CONNECTION_CONNECTED => {
let crtc = xrandr::get_crtc_info(&conn, info.crtc(), timestamp)
.get_reply()
.ok()?;
Some(crtc.as_rect())
}
_ => None,
}
})
.collect();
debug!("display_frames: {:?}", display_frames);
let gvec: Vec<i32> =
root_window.get_property(&conn, "_NET_WORKAREA", xproto::ATOM_CARDINAL, 8)?;
debug!("gvec: {:?}", gvec);
let work_area = gvec
.as_slice()
.chunks(4)
.map(|slc| {
ScreenRect::new(
ScreenPoint::new(slc[0] as i32, slc[1] as i32),
ScreenSize::new(slc[2] as i32, slc[3] as i32),
)
})
.collect::<Vec<ScreenRect>>();
debug!("Work area: {:?}", work_area);
use xcb_util::geometry::*;
let active_window: xproto::Window =
root_window.get_property(&conn, "_NET_ACTIVE_WINDOW", xproto::ATOM_WINDOW, 1)?[0];
let mut active_window_frame = active_window.get_geometry(&conn)?.as_rect();
let translated =
xproto::translate_coordinates(&conn, active_window, root_window, 0, 0).get_reply()?;
active_window_frame.origin.x = translated.dst_x() as i32;
active_window_frame.origin.y = translated.dst_y() as i32;
let insets = active_window.get_property(&conn, "_NET_FRAME_EXTENTS", xproto::ATOM_CARDINAL, 4)?;
let insets = if let [left, right, top, bottom] = insets.as_slice() {
ScreenInsets::new(*top, *right, *bottom, *left)
} else {
ScreenInsets::zero()
};
Ok(Geometry {
setup,
root_win: root_window,
root_win_frame: root_window_rect,
srs,
display_frames,
work_areas: work_area,
active_window,
active_window_frame,
active_window_insets: insets,
})
}
#[derive(StructOpt)]
struct MoveWindowOnOutput {
x: Fract,
y: Fract,
w: Fract,
h: Fract,
}
fn inset_frame_by_struts(conn: &xbase::Connection, mut frame: ScreenRect, root_window: xproto::Window) -> Result<ScreenRect, Error> {
let mut queue = vec![root_window];
while let Some(w) = queue.pop() {
let strut: Vec<i32> =
w.get_property(conn, "_NET_WM_STRUT_PARTIAL", xproto::ATOM_CARDINAL, 12)?;
if!strut.is_empty() {
#[derive(Debug)]
struct Strut {
left: i32,
right: i32,
top: i32,
bottom: i32,
left_start_y: i32,
left_end_y: i32,
right_start_y: i32,
right_end_y: i32,
top_start_x: i32,
top_end_x: i32,
bottom_start_x: i32,
bottom_end_x: i32,
}
let strut = Strut {
left: strut[0],
right: strut[1],
top: strut[2],
bottom: strut[3],
left_start_y: strut[4],
left_end_y: strut[5],
right_start_y: strut[6],
right_end_y: strut[7],
top_start_x: strut[8],
top_end_x: strut[9],
bottom_start_x: strut[10],
bottom_end_x: strut[11],
};
// TODO:
// - Check if the strut-lines (NOT the whole rect) are contained within the
// target display frame
// - IF so, adjust the display frame
if strut.top > frame.origin.y
&& strut.top < frame.origin.y + frame.size.height
&& strut.top_start_x >= frame.origin.x
&& strut.top_end_x <= frame.origin.x + frame.size.width
{
let overlap = strut.top - frame.origin.y;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.origin.y += overlap;
frame.size.height -= overlap;
}
if strut.left > frame.origin.x
&& strut.left < frame.origin.x + frame.size.width
&& strut.left_start_y >= frame.origin.y
&& strut.left_end_y <= frame.origin.y + frame.size.height
{
let overlap = strut.left - frame.origin.x;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.origin.x += overlap;
frame.size.width -= overlap;
}
if strut.bottom < frame.origin.y + frame.size.height
&& strut.bottom > frame.origin.y
&& strut.bottom_start_x >= frame.origin.x
&& strut.bottom_end_x <= frame.origin.x + frame.size.width
{
let overlap = frame.origin.y + frame.size.height - strut.bottom;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.size.height -= overlap;
}
if strut.right < frame.origin.x + frame.size.width
&& strut.right > frame.origin.x
&& strut.right_start_y >= frame.origin.y
&& strut.right_end_y <= frame.origin.y + frame.size.height
{
let overlap = frame.origin.x + frame.size.width - strut.left;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.size.width -= overlap;
}
}
let mut children = xproto::query_tree(conn, w).get_reply()?.children().to_vec();
queue.append(&mut children);
}
Ok(frame)
}
// TODO (alaroldai):
// Compute "output dimensions" by:
// - Getting the rects of connected outputs
// - Finding all windows that set the _NET_STRUT_PARTIAL
// - FOR EACH, Inset the rect of the containing output if necessary
// - Return the inset outputs.
fn get_output_available_rect(conn: &xbase::Connection) -> Result<ScreenRect, Error> {
let setup = conn.get_setup();
let screen = setup
.roots()
.next()
.ok_or_else(|| anyhow!("Couldn't unwrap screen 0"))?;
let root_window = screen.root();
let active_window: xproto::Window =
root_window.get_property(&conn, "_NET_ACTIVE_WINDOW", xproto::ATOM_WINDOW, 1)?[0];
let mut active_window_frame = dbg!(active_window.get_geometry(&conn)?.as_rect());
let translated =
xproto::translate_coordinates(&conn, active_window, root_window, 0, 0).get_reply()?;
active_window_frame.origin.x = translated.dst_x() as i32;
active_window_frame.origin.y = translated.dst_y() as i32;
let srs = root_window.get_screen_resources_current(&conn)?;
let timestamp = srs.config_timestamp();
let mut display_frame = srs
.outputs()
.iter()
.filter_map(|o| {
let info = xrandr::get_output_info(&conn, *o, timestamp)
.get_reply()
.ok()?;
match info.connection() as u32 {
xrandr::CONNECTION_CONNECTED => {
let crtc = xrandr::get_crtc_info(&conn, info.crtc(), timestamp)
.get_reply()
.ok()?;
Some(crtc.as_rect())
}
_ => None,
}
})
.fold(None, |init: Option<ScreenRect>, frame| {
let new = frame.intersection(&active_window_frame);
debug!(
"{}: {} intersection with {}",
frame,
if new.is_some() { "Some" } else { "No" },
active_window_frame
);
match (new, init) {
(Some(new), Some(old)) if new.area() > old.area() => Some(frame),
(Some(_), None) => Some(frame),
_ => init,
}
})
.unwrap();
display_frame = inset_frame_by_struts(conn, display_frame, root_window)?;
Ok(display_frame)
}
impl MoveWindowOnOutput {
fn run(self, _: GlobalOptions) -> Result<(), Error> {
let (conn, _) = xbase::Connection::connect(None)?; | let pct = DisplayPercentageSpaceRect::new(
DisplayPercentageSpacePoint::new(self.x.value(), self.y.value()),
DisplayPercentageSpaceSize::new(self.w.value(), self.h.value()),
);
let new_rect = pct
.to_rect(display_frame)
.inner_rect(geom.active_window_insets);
dbg!(&new_rect);
// NOTE: Some window managers (Kwin and XFWM, for example) may refuse to
// position windows as requested if they are in a "tiled" or "maximised"
// state. In the case of Kwin, this can be fixed by using a window rule to
// force the "ignore requested geometry" flag to `false`.
geom
.root_win
.move_resize(&conn, geom.active_window, new_rect)?;
Ok(())
}
}
#[derive(StructOpt)]
enum Direction {
North,
South,
East,
West,
}
impl std::str::FromStr for Direction {
type Err = Error;
fn from_str(s: &str) -> Result<Direction, Error> {
match s {
"h" => Ok(Direction::West),
"j" => Ok(Direction::South),
"k" => Ok(Direction::North),
"l" => Ok(Direction::East),
_ => Err(anyhow!("Not a known direction - use hjkl")),
}
}
}
#[derive(StructOpt)]
struct MoveWindowToOutput {
direction: Direction,
}
impl MoveWindowToOutput {
fn run(self, _: GlobalOptions) -> Result<(), Error> {
let (conn, _) = xbase::Connection::connect(None)?;
let mut geom = get_geometry(&conn)?;
let (x, y) = match self.direction {
Direction::West => (-1.0, 0.0),
Direction::South => (0.0, 1.0),
Direction::North => (0.0, -1.0),
Direction::East => (1.0, 0.0),
};
let direction: euclid::Vector2D<f32, ScreenSpace> = euclid::Vector2D::new(x as f32, y as f32);
let current_output_frame = geom
.display_frames
.iter()
.fold(None, |init: Option<ScreenRect>, frame| {
let new = frame.intersection(&geom.active_window_frame);
println!("Found intersection: {:#?}", new);
match (new, init) {
(Some(new), Some(old)) if new.area() > old.area() => Some(*frame),
(Some(_), None) => Some(*frame),
_ => init,
}
})
.and_then(|frame| inset_frame_by_struts(&conn, frame, geom.root_win).ok())
.unwrap();
let new_output_frame = geom
.display_frames
.iter()
.fold(None, |init: Option<ScreenRect>, frame| {
let vec: euclid::Vector2D<f32, ScreenSpace> =
(frame.center() - current_output_frame.center()).cast::<f32>();
let old: Option<euclid::Vector2D<f32, ScreenSpace>> =
init.map(|init| (init.center() - current_output_frame.center()).cast::<f32>());
let projection = vec.dot(direction);
match old {
None if projection > 0.0 => {
println!(
"Starting with output {:#?} / projection {:#?}",
frame, projection
);
Some(*frame)
}
Some(old) if projection < old.dot(direction) && projection > 0.0 => {
println!(
"Replacing projection {} ({}) with {} ({})",
init.unwrap(),
old.dot(direction),
frame,
projection
);
Some(*frame)
}
_ => {
println!(
"Ignoring output {:#?} with projection {:#?}",
frame, projection
);
init
}
}
})
.unwrap();
let new_output_frame = inset_frame_by_struts(&conn, new_output_frame, geom.root_win)?;
dbg!(&geom.active_window_frame);
// geom.active_window_frame = geom.active_window_frame.inner_rect(geom.active_window_insets);
dbg!(&geom.active_window_insets);
dbg!(¤t_output_frame);
dbg!(&new_output_frame);
let decorated_source_frame = geom.active_window_frame.outer_rect(geom.active_window_insets);
let pct_rect = decorated_source_frame.as_dps(current_output_frame);
dbg!(&pct_rect);
let decorated_dest_frame = pct_rect.to_rect(new_output_frame);
let bare_dest_frame = decorated_dest_frame.inner_rect(geom.active_window_insets);
dbg!(&bare_dest_frame);
geom
.root_win
.move_resize(&conn, geom.active_window, bare_dest_frame)
}
}
fn main() -> Result<(), Error> {
env_logger::init();
#[derive(StructOpt)]
enum Action {
MoveWindowOnOutput(MoveWindowOnOutput),
MoveWindowToOutput(MoveWindowToOutput),
}
#[derive(StructOpt)]
struct App {
#[structopt(flatten)]
options: GlobalOptions,
#[structopt(subcommand)]
action: Action,
}
impl App {
fn run(self) -> Result<(), Error> {
match self.action {
Action::MoveWindowOnOutput(opts) => opts.run(self.options),
Action::MoveWindowToOutput(opts) => opts.run(self.options),
}
}
}
App::from_args().run()
} | let display_frame = get_output_available_rect(&conn)?;
let geom = get_geometry(&conn)?;
| random_line_split |
main.rs | mod xcb_util;
use log::debug;
use crate::xcb_util::{
geometry::*,
window::WindowExt,
};
use std::str;
use anyhow::{
anyhow,
Error,
};
use structopt::StructOpt;
use xcb::{
base as xbase,
randr as xrandr,
xproto,
};
#[derive(StructOpt)]
struct GlobalOptions {}
#[derive(StructOpt)]
struct | {
num: f32,
denom: f32,
}
impl Fract {
fn value(&self) -> f32 { self.num / self.denom }
}
impl std::str::FromStr for Fract {
type Err = Error;
fn from_str(s: &str) -> Result<Fract, Error> {
let parts = s.split('/').collect::<Vec<_>>();
Ok(Fract {
num: f32::from_str(parts[0])?,
denom: f32::from_str(parts[1])?,
})
}
}
struct Geometry<'a> {
pub setup: xproto::Setup<'a>,
pub root_win: xproto::Window,
pub root_win_frame: ScreenRect,
pub srs: xrandr::GetScreenResourcesCurrentReply,
pub display_frames: Vec<ScreenRect>,
pub work_areas: Vec<ScreenRect>,
pub active_window: xproto::Window,
pub active_window_frame: ScreenRect,
pub active_window_insets: ScreenInsets,
}
fn get_geometry(conn: &xbase::Connection) -> Result<Geometry, Error> {
let setup = conn.get_setup();
let screen = setup
.roots()
.next()
.ok_or_else(|| anyhow!("Couldn't unwrap screen 0"))?;
let root_window = screen.root();
let root_window_rect = root_window.get_geometry(&conn)?.as_rect();
let srs = root_window.get_screen_resources_current(&conn)?;
let timestamp = srs.config_timestamp();
let display_frames = srs
.outputs()
.iter()
.filter_map(|o| {
let info = xrandr::get_output_info(&conn, *o, timestamp)
.get_reply()
.ok()?;
match info.connection() as u32 {
xrandr::CONNECTION_CONNECTED => {
let crtc = xrandr::get_crtc_info(&conn, info.crtc(), timestamp)
.get_reply()
.ok()?;
Some(crtc.as_rect())
}
_ => None,
}
})
.collect();
debug!("display_frames: {:?}", display_frames);
let gvec: Vec<i32> =
root_window.get_property(&conn, "_NET_WORKAREA", xproto::ATOM_CARDINAL, 8)?;
debug!("gvec: {:?}", gvec);
let work_area = gvec
.as_slice()
.chunks(4)
.map(|slc| {
ScreenRect::new(
ScreenPoint::new(slc[0] as i32, slc[1] as i32),
ScreenSize::new(slc[2] as i32, slc[3] as i32),
)
})
.collect::<Vec<ScreenRect>>();
debug!("Work area: {:?}", work_area);
use xcb_util::geometry::*;
let active_window: xproto::Window =
root_window.get_property(&conn, "_NET_ACTIVE_WINDOW", xproto::ATOM_WINDOW, 1)?[0];
let mut active_window_frame = active_window.get_geometry(&conn)?.as_rect();
let translated =
xproto::translate_coordinates(&conn, active_window, root_window, 0, 0).get_reply()?;
active_window_frame.origin.x = translated.dst_x() as i32;
active_window_frame.origin.y = translated.dst_y() as i32;
let insets = active_window.get_property(&conn, "_NET_FRAME_EXTENTS", xproto::ATOM_CARDINAL, 4)?;
let insets = if let [left, right, top, bottom] = insets.as_slice() {
ScreenInsets::new(*top, *right, *bottom, *left)
} else {
ScreenInsets::zero()
};
Ok(Geometry {
setup,
root_win: root_window,
root_win_frame: root_window_rect,
srs,
display_frames,
work_areas: work_area,
active_window,
active_window_frame,
active_window_insets: insets,
})
}
#[derive(StructOpt)]
struct MoveWindowOnOutput {
x: Fract,
y: Fract,
w: Fract,
h: Fract,
}
fn inset_frame_by_struts(conn: &xbase::Connection, mut frame: ScreenRect, root_window: xproto::Window) -> Result<ScreenRect, Error> {
let mut queue = vec![root_window];
while let Some(w) = queue.pop() {
let strut: Vec<i32> =
w.get_property(conn, "_NET_WM_STRUT_PARTIAL", xproto::ATOM_CARDINAL, 12)?;
if!strut.is_empty() {
#[derive(Debug)]
struct Strut {
left: i32,
right: i32,
top: i32,
bottom: i32,
left_start_y: i32,
left_end_y: i32,
right_start_y: i32,
right_end_y: i32,
top_start_x: i32,
top_end_x: i32,
bottom_start_x: i32,
bottom_end_x: i32,
}
let strut = Strut {
left: strut[0],
right: strut[1],
top: strut[2],
bottom: strut[3],
left_start_y: strut[4],
left_end_y: strut[5],
right_start_y: strut[6],
right_end_y: strut[7],
top_start_x: strut[8],
top_end_x: strut[9],
bottom_start_x: strut[10],
bottom_end_x: strut[11],
};
// TODO:
// - Check if the strut-lines (NOT the whole rect) are contained within the
// target display frame
// - IF so, adjust the display frame
if strut.top > frame.origin.y
&& strut.top < frame.origin.y + frame.size.height
&& strut.top_start_x >= frame.origin.x
&& strut.top_end_x <= frame.origin.x + frame.size.width
{
let overlap = strut.top - frame.origin.y;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.origin.y += overlap;
frame.size.height -= overlap;
}
if strut.left > frame.origin.x
&& strut.left < frame.origin.x + frame.size.width
&& strut.left_start_y >= frame.origin.y
&& strut.left_end_y <= frame.origin.y + frame.size.height
{
let overlap = strut.left - frame.origin.x;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.origin.x += overlap;
frame.size.width -= overlap;
}
if strut.bottom < frame.origin.y + frame.size.height
&& strut.bottom > frame.origin.y
&& strut.bottom_start_x >= frame.origin.x
&& strut.bottom_end_x <= frame.origin.x + frame.size.width
{
let overlap = frame.origin.y + frame.size.height - strut.bottom;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.size.height -= overlap;
}
if strut.right < frame.origin.x + frame.size.width
&& strut.right > frame.origin.x
&& strut.right_start_y >= frame.origin.y
&& strut.right_end_y <= frame.origin.y + frame.size.height
{
let overlap = frame.origin.x + frame.size.width - strut.left;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.size.width -= overlap;
}
}
let mut children = xproto::query_tree(conn, w).get_reply()?.children().to_vec();
queue.append(&mut children);
}
Ok(frame)
}
// TODO (alaroldai):
// Compute "output dimensions" by:
// - Getting the rects of connected outputs
// - Finding all windows that set the _NET_STRUT_PARTIAL
// - FOR EACH, Inset the rect of the containing output if necessary
// - Return the inset outputs.
fn get_output_available_rect(conn: &xbase::Connection) -> Result<ScreenRect, Error> {
let setup = conn.get_setup();
let screen = setup
.roots()
.next()
.ok_or_else(|| anyhow!("Couldn't unwrap screen 0"))?;
let root_window = screen.root();
let active_window: xproto::Window =
root_window.get_property(&conn, "_NET_ACTIVE_WINDOW", xproto::ATOM_WINDOW, 1)?[0];
let mut active_window_frame = dbg!(active_window.get_geometry(&conn)?.as_rect());
let translated =
xproto::translate_coordinates(&conn, active_window, root_window, 0, 0).get_reply()?;
active_window_frame.origin.x = translated.dst_x() as i32;
active_window_frame.origin.y = translated.dst_y() as i32;
let srs = root_window.get_screen_resources_current(&conn)?;
let timestamp = srs.config_timestamp();
let mut display_frame = srs
.outputs()
.iter()
.filter_map(|o| {
let info = xrandr::get_output_info(&conn, *o, timestamp)
.get_reply()
.ok()?;
match info.connection() as u32 {
xrandr::CONNECTION_CONNECTED => {
let crtc = xrandr::get_crtc_info(&conn, info.crtc(), timestamp)
.get_reply()
.ok()?;
Some(crtc.as_rect())
}
_ => None,
}
})
.fold(None, |init: Option<ScreenRect>, frame| {
let new = frame.intersection(&active_window_frame);
debug!(
"{}: {} intersection with {}",
frame,
if new.is_some() { "Some" } else { "No" },
active_window_frame
);
match (new, init) {
(Some(new), Some(old)) if new.area() > old.area() => Some(frame),
(Some(_), None) => Some(frame),
_ => init,
}
})
.unwrap();
display_frame = inset_frame_by_struts(conn, display_frame, root_window)?;
Ok(display_frame)
}
impl MoveWindowOnOutput {
fn run(self, _: GlobalOptions) -> Result<(), Error> {
let (conn, _) = xbase::Connection::connect(None)?;
let display_frame = get_output_available_rect(&conn)?;
let geom = get_geometry(&conn)?;
let pct = DisplayPercentageSpaceRect::new(
DisplayPercentageSpacePoint::new(self.x.value(), self.y.value()),
DisplayPercentageSpaceSize::new(self.w.value(), self.h.value()),
);
let new_rect = pct
.to_rect(display_frame)
.inner_rect(geom.active_window_insets);
dbg!(&new_rect);
// NOTE: Some window managers (Kwin and XFWM, for example) may refuse to
// position windows as requested if they are in a "tiled" or "maximised"
// state. In the case of Kwin, this can be fixed by using a window rule to
// force the "ignore requested geometry" flag to `false`.
geom
.root_win
.move_resize(&conn, geom.active_window, new_rect)?;
Ok(())
}
}
#[derive(StructOpt)]
enum Direction {
North,
South,
East,
West,
}
impl std::str::FromStr for Direction {
type Err = Error;
fn from_str(s: &str) -> Result<Direction, Error> {
match s {
"h" => Ok(Direction::West),
"j" => Ok(Direction::South),
"k" => Ok(Direction::North),
"l" => Ok(Direction::East),
_ => Err(anyhow!("Not a known direction - use hjkl")),
}
}
}
#[derive(StructOpt)]
struct MoveWindowToOutput {
direction: Direction,
}
impl MoveWindowToOutput {
fn run(self, _: GlobalOptions) -> Result<(), Error> {
let (conn, _) = xbase::Connection::connect(None)?;
let mut geom = get_geometry(&conn)?;
let (x, y) = match self.direction {
Direction::West => (-1.0, 0.0),
Direction::South => (0.0, 1.0),
Direction::North => (0.0, -1.0),
Direction::East => (1.0, 0.0),
};
let direction: euclid::Vector2D<f32, ScreenSpace> = euclid::Vector2D::new(x as f32, y as f32);
let current_output_frame = geom
.display_frames
.iter()
.fold(None, |init: Option<ScreenRect>, frame| {
let new = frame.intersection(&geom.active_window_frame);
println!("Found intersection: {:#?}", new);
match (new, init) {
(Some(new), Some(old)) if new.area() > old.area() => Some(*frame),
(Some(_), None) => Some(*frame),
_ => init,
}
})
.and_then(|frame| inset_frame_by_struts(&conn, frame, geom.root_win).ok())
.unwrap();
let new_output_frame = geom
.display_frames
.iter()
.fold(None, |init: Option<ScreenRect>, frame| {
let vec: euclid::Vector2D<f32, ScreenSpace> =
(frame.center() - current_output_frame.center()).cast::<f32>();
let old: Option<euclid::Vector2D<f32, ScreenSpace>> =
init.map(|init| (init.center() - current_output_frame.center()).cast::<f32>());
let projection = vec.dot(direction);
match old {
None if projection > 0.0 => {
println!(
"Starting with output {:#?} / projection {:#?}",
frame, projection
);
Some(*frame)
}
Some(old) if projection < old.dot(direction) && projection > 0.0 => {
println!(
"Replacing projection {} ({}) with {} ({})",
init.unwrap(),
old.dot(direction),
frame,
projection
);
Some(*frame)
}
_ => {
println!(
"Ignoring output {:#?} with projection {:#?}",
frame, projection
);
init
}
}
})
.unwrap();
let new_output_frame = inset_frame_by_struts(&conn, new_output_frame, geom.root_win)?;
dbg!(&geom.active_window_frame);
// geom.active_window_frame = geom.active_window_frame.inner_rect(geom.active_window_insets);
dbg!(&geom.active_window_insets);
dbg!(¤t_output_frame);
dbg!(&new_output_frame);
let decorated_source_frame = geom.active_window_frame.outer_rect(geom.active_window_insets);
let pct_rect = decorated_source_frame.as_dps(current_output_frame);
dbg!(&pct_rect);
let decorated_dest_frame = pct_rect.to_rect(new_output_frame);
let bare_dest_frame = decorated_dest_frame.inner_rect(geom.active_window_insets);
dbg!(&bare_dest_frame);
geom
.root_win
.move_resize(&conn, geom.active_window, bare_dest_frame)
}
}
fn main() -> Result<(), Error> {
env_logger::init();
#[derive(StructOpt)]
enum Action {
MoveWindowOnOutput(MoveWindowOnOutput),
MoveWindowToOutput(MoveWindowToOutput),
}
#[derive(StructOpt)]
struct App {
#[structopt(flatten)]
options: GlobalOptions,
#[structopt(subcommand)]
action: Action,
}
impl App {
fn run(self) -> Result<(), Error> {
match self.action {
Action::MoveWindowOnOutput(opts) => opts.run(self.options),
Action::MoveWindowToOutput(opts) => opts.run(self.options),
}
}
}
App::from_args().run()
}
| Fract | identifier_name |
lib.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod bitvec_serde;
pub mod rleplus;
pub use bitvec;
use bitvec::prelude::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
use fnv::FnvHashSet;
use std::iter::FromIterator;
type BitVec = bitvec::prelude::BitVec<Lsb0, u8>;
type Result<T> = std::result::Result<T, &'static str>;
/// Represents a bitfield to track bits set at indexes in the range of `u64`.
#[derive(Debug, Clone)]
pub enum BitField {
Encoded {
bv: BitVec,
set: FnvHashSet<u64>,
unset: FnvHashSet<u64>,
},
// TODO would be beneficial in future to only keep encoded bitvec in memory, but comes at a cost
Decoded(BitVec),
}
impl Default for BitField {
fn default() -> Self {
Self::Decoded(BitVec::new())
}
}
impl BitField {
pub fn new() -> Self {
Self::default()
}
/// Generates a new bitfield with a slice of all indexes to set.
pub fn new_from_set(set_bits: &[u64]) -> Self {
let mut vec = match set_bits.iter().max() {
Some(&max) => bitvec![_, u8; 0; max as usize + 1],
None => return Self::new(),
};
// Set all bits in bitfield
for b in set_bits {
vec.set(*b as usize, true);
}
Self::Decoded(vec)
}
/// Sets bit at bit index provided
pub fn set(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset,.. } => {
unset.remove(&bit);
set.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
bv.resize(index + 1, false);
}
bv.set(index, true);
}
}
}
/// Removes bit at bit index provided
pub fn unset(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset,.. } => {
set.remove(&bit);
unset.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
return;
}
bv.set(index, false);
}
}
}
/// Gets the bit at the given index.
// TODO this probably should not require mut self and RLE decode bits
pub fn get(&mut self, index: u64) -> Result<bool> {
match self {
BitField::Encoded { set, unset,.. } => |
BitField::Decoded(bv) => {
if let Some(true) = bv.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
}
}
/// Retrieves the index of the first set bit, and error if invalid encoding or no bits set.
pub fn first(&mut self) -> Result<u64> {
for (i, b) in (0..).zip(self.as_mut_flushed()?.iter()) {
if b == &true {
return Ok(i);
}
}
// Return error if none found, not ideal but no reason not to match
Err("Bitfield has no set bits")
}
fn retrieve_set_indices<B: FromIterator<u64>>(&mut self, max: usize) -> Result<B> {
let flushed = self.as_mut_flushed()?;
if flushed.count_ones() > max {
return Err("Bits set exceeds max in retrieval");
}
Ok((0..)
.zip(flushed.iter())
.filter_map(|(i, b)| if b == &true { Some(i) } else { None })
.collect())
}
/// Returns a vector of indexes of all set bits
pub fn all(&mut self, max: usize) -> Result<Vec<u64>> {
self.retrieve_set_indices(max)
}
/// Returns a Hash set of indexes of all set bits
pub fn all_set(&mut self, max: usize) -> Result<FnvHashSet<u64>> {
self.retrieve_set_indices(max)
}
pub fn for_each<F>(&mut self, mut callback: F) -> std::result::Result<(), String>
where
F: FnMut(u64) -> std::result::Result<(), String>,
{
let flushed = self.as_mut_flushed()?;
for (i, &b) in (0..).zip(flushed.iter()) {
if b {
callback(i)?;
}
}
Ok(())
}
/// Returns true if there are no bits set, false if the bitfield is empty.
pub fn is_empty(&mut self) -> Result<bool> {
for b in self.as_mut_flushed()?.iter() {
if b == &true {
return Ok(false);
}
}
Ok(true)
}
/// Returns a slice of the bitfield with the start index of set bits
/// and number of bits to include in slice.
pub fn slice(&mut self, start: u64, count: u64) -> Result<BitField> {
if count == 0 {
return Ok(BitField::default());
}
// These conversions aren't ideal, but we aren't supporting 32 bit targets
let mut start = start as usize;
let mut count = count as usize;
let bitvec = self.as_mut_flushed()?;
let mut start_idx: usize = 0;
let mut range: usize = 0;
if start!= 0 {
for (i, v) in bitvec.iter().enumerate() {
if v == &true {
start -= 1;
if start == 0 {
start_idx = i + 1;
break;
}
}
}
}
for (i, v) in bitvec[start_idx..].iter().enumerate() {
if v == &true {
count -= 1;
if count == 0 {
range = i + 1;
break;
}
}
}
if count > 0 {
return Err("Not enough bits to index the slice");
}
let mut slice = BitVec::with_capacity(start_idx + range);
slice.resize(start_idx, false);
slice.extend_from_slice(&bitvec[start_idx..start_idx + range]);
Ok(BitField::Decoded(slice))
}
/// Retrieves number of set bits in the bitfield
///
/// This function requires a mutable reference for now to be able to handle the cached
/// changes in the case of an RLE encoded bitfield.
pub fn count(&mut self) -> Result<usize> {
Ok(self.as_mut_flushed()?.count_ones())
}
fn flush(&mut self) -> Result<()> {
if let BitField::Encoded { bv, set, unset } = self {
*self = BitField::Decoded(decode_and_apply_cache(bv, set, unset)?);
}
Ok(())
}
fn into_flushed(mut self) -> Result<BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
fn as_mut_flushed(&mut self) -> Result<&mut BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
/// Merges to bitfields together (equivalent of bitwise OR `|` operator)
pub fn merge(mut self, other: &Self) -> Result<Self> {
self.merge_assign(other)?;
Ok(self)
}
/// Merges to bitfields into `self` (equivalent of bitwise OR `|` operator)
pub fn merge_assign(&mut self, other: &Self) -> Result<()> {
let a = self.as_mut_flushed()?;
match other {
BitField::Encoded { bv, set, unset } => {
let v = decode_and_apply_cache(bv, set, unset)?;
bit_or(a, v.into_iter())
}
BitField::Decoded(bv) => bit_or(a, bv.iter().copied()),
}
Ok(())
}
/// Intersection of two bitfields (equivalent of bit AND `&`)
pub fn intersect(mut self, other: &Self) -> Result<Self> {
self.intersect_assign(other)?;
Ok(self)
}
/// Intersection of two bitfields and assigns to self (equivalent of bit AND `&`)
pub fn intersect_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied(),
}
Ok(())
}
/// Subtract other bitfield from self (equivalent of `a &!b`)
pub fn subtract(mut self, other: &Self) -> Result<Self> {
self.subtract_assign(other)?;
Ok(self)
}
/// Subtract other bitfield from self (equivalent of `a &!b`)
pub fn subtract_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &=!decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied().map(|b|!b),
}
Ok(())
}
/// Creates a bitfield which is a union of a vector of bitfields.
pub fn union<'a>(bit_fields: impl IntoIterator<Item = &'a Self>) -> Result<Self> {
let mut ret = Self::default();
for bf in bit_fields.into_iter() {
ret.merge_assign(bf)?;
}
Ok(ret)
}
/// Returns true if BitFields have any overlapping bits.
pub fn contains_any(&mut self, other: &mut BitField) -> Result<bool> {
for (&a, &b) in self
.as_mut_flushed()?
.iter()
.zip(other.as_mut_flushed()?.iter())
{
if a && b {
return Ok(true);
}
}
Ok(false)
}
/// Returns true if the self `BitField` has all the bits set in the other `BitField`.
pub fn contains_all(&mut self, other: &mut BitField) -> Result<bool> {
let a_bf = self.as_mut_flushed()?;
let b_bf = other.as_mut_flushed()?;
// Checking lengths should be sufficient in most cases, but does not take into account
// decoded bitfields with extra 0 bits. This makes sure there are no extra bits in the
// extension.
if b_bf.len() > a_bf.len() && b_bf[a_bf.len()..].count_ones() > 0 {
return Ok(false);
}
for (a, b) in a_bf.iter().zip(b_bf.iter()) {
if *b &&!a {
return Ok(false);
}
}
Ok(true)
}
}
fn bit_or<I>(a: &mut BitVec, mut b: I)
where
I: Iterator<Item = bool>,
{
for mut a_i in a.iter_mut() {
match b.next() {
Some(true) => *a_i = true,
Some(false) => (),
None => return,
}
}
a.extend(b);
}
fn decode_and_apply_cache(
bit_vec: &BitVec,
set: &FnvHashSet<u64>,
unset: &FnvHashSet<u64>,
) -> Result<BitVec> {
let mut decoded = rleplus::decode(bit_vec)?;
// Resize before setting any values
if let Some(&max) = set.iter().max() {
let max = max as usize;
if max >= bit_vec.len() {
decoded.resize(max + 1, false);
}
};
// Set all values in the cache
for &b in set.iter() {
decoded.set(b as usize, true);
}
// Unset all values from the encoded cache
for &b in unset.iter() {
decoded.set(b as usize, false);
}
Ok(decoded)
}
impl AsRef<BitField> for BitField {
fn as_ref(&self) -> &Self {
self
}
}
impl From<BitVec> for BitField {
fn from(b: BitVec) -> Self {
Self::Decoded(b)
}
}
impl<B> BitOr<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: B) -> Self {
self.merge(rhs.as_ref()).unwrap()
}
}
impl<B> BitOrAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitor_assign(&mut self, rhs: B) {
self.merge_assign(rhs.as_ref()).unwrap()
}
}
impl<B> BitAnd<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: B) -> Self::Output {
self.intersect(rhs.as_ref()).unwrap()
}
}
impl<B> BitAndAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitand_assign(&mut self, rhs: B) {
self.intersect_assign(rhs.as_ref()).unwrap()
}
}
impl Not for BitField {
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self::Decoded(!self.into_flushed().unwrap())
}
}
| {
if set.contains(&index) {
return Ok(true);
}
if unset.contains(&index) {
return Ok(false);
}
// Check in encoded for the given bit
// This can be changed to not flush changes
if let Some(true) = self.as_mut_flushed()?.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
} | conditional_block |
lib.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod bitvec_serde;
pub mod rleplus;
pub use bitvec;
use bitvec::prelude::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
use fnv::FnvHashSet;
use std::iter::FromIterator;
type BitVec = bitvec::prelude::BitVec<Lsb0, u8>;
type Result<T> = std::result::Result<T, &'static str>;
/// Represents a bitfield to track bits set at indexes in the range of `u64`.
#[derive(Debug, Clone)]
pub enum BitField {
Encoded {
bv: BitVec,
set: FnvHashSet<u64>,
unset: FnvHashSet<u64>,
},
// TODO would be beneficial in future to only keep encoded bitvec in memory, but comes at a cost
Decoded(BitVec),
}
impl Default for BitField {
fn | () -> Self {
Self::Decoded(BitVec::new())
}
}
impl BitField {
pub fn new() -> Self {
Self::default()
}
/// Generates a new bitfield with a slice of all indexes to set.
pub fn new_from_set(set_bits: &[u64]) -> Self {
let mut vec = match set_bits.iter().max() {
Some(&max) => bitvec![_, u8; 0; max as usize + 1],
None => return Self::new(),
};
// Set all bits in bitfield
for b in set_bits {
vec.set(*b as usize, true);
}
Self::Decoded(vec)
}
/// Sets bit at bit index provided
pub fn set(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset,.. } => {
unset.remove(&bit);
set.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
bv.resize(index + 1, false);
}
bv.set(index, true);
}
}
}
/// Removes bit at bit index provided
pub fn unset(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset,.. } => {
set.remove(&bit);
unset.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
return;
}
bv.set(index, false);
}
}
}
/// Gets the bit at the given index.
// TODO this probably should not require mut self and RLE decode bits
pub fn get(&mut self, index: u64) -> Result<bool> {
match self {
BitField::Encoded { set, unset,.. } => {
if set.contains(&index) {
return Ok(true);
}
if unset.contains(&index) {
return Ok(false);
}
// Check in encoded for the given bit
// This can be changed to not flush changes
if let Some(true) = self.as_mut_flushed()?.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
BitField::Decoded(bv) => {
if let Some(true) = bv.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
}
}
/// Retrieves the index of the first set bit, and error if invalid encoding or no bits set.
pub fn first(&mut self) -> Result<u64> {
for (i, b) in (0..).zip(self.as_mut_flushed()?.iter()) {
if b == &true {
return Ok(i);
}
}
// Return error if none found, not ideal but no reason not to match
Err("Bitfield has no set bits")
}
fn retrieve_set_indices<B: FromIterator<u64>>(&mut self, max: usize) -> Result<B> {
let flushed = self.as_mut_flushed()?;
if flushed.count_ones() > max {
return Err("Bits set exceeds max in retrieval");
}
Ok((0..)
.zip(flushed.iter())
.filter_map(|(i, b)| if b == &true { Some(i) } else { None })
.collect())
}
/// Returns a vector of indexes of all set bits
pub fn all(&mut self, max: usize) -> Result<Vec<u64>> {
self.retrieve_set_indices(max)
}
/// Returns a Hash set of indexes of all set bits
pub fn all_set(&mut self, max: usize) -> Result<FnvHashSet<u64>> {
self.retrieve_set_indices(max)
}
pub fn for_each<F>(&mut self, mut callback: F) -> std::result::Result<(), String>
where
F: FnMut(u64) -> std::result::Result<(), String>,
{
let flushed = self.as_mut_flushed()?;
for (i, &b) in (0..).zip(flushed.iter()) {
if b {
callback(i)?;
}
}
Ok(())
}
/// Returns true if there are no bits set, false if the bitfield is empty.
pub fn is_empty(&mut self) -> Result<bool> {
for b in self.as_mut_flushed()?.iter() {
if b == &true {
return Ok(false);
}
}
Ok(true)
}
/// Returns a slice of the bitfield with the start index of set bits
/// and number of bits to include in slice.
pub fn slice(&mut self, start: u64, count: u64) -> Result<BitField> {
if count == 0 {
return Ok(BitField::default());
}
// These conversions aren't ideal, but we aren't supporting 32 bit targets
let mut start = start as usize;
let mut count = count as usize;
let bitvec = self.as_mut_flushed()?;
let mut start_idx: usize = 0;
let mut range: usize = 0;
if start!= 0 {
for (i, v) in bitvec.iter().enumerate() {
if v == &true {
start -= 1;
if start == 0 {
start_idx = i + 1;
break;
}
}
}
}
for (i, v) in bitvec[start_idx..].iter().enumerate() {
if v == &true {
count -= 1;
if count == 0 {
range = i + 1;
break;
}
}
}
if count > 0 {
return Err("Not enough bits to index the slice");
}
let mut slice = BitVec::with_capacity(start_idx + range);
slice.resize(start_idx, false);
slice.extend_from_slice(&bitvec[start_idx..start_idx + range]);
Ok(BitField::Decoded(slice))
}
/// Retrieves number of set bits in the bitfield
///
/// This function requires a mutable reference for now to be able to handle the cached
/// changes in the case of an RLE encoded bitfield.
pub fn count(&mut self) -> Result<usize> {
Ok(self.as_mut_flushed()?.count_ones())
}
fn flush(&mut self) -> Result<()> {
if let BitField::Encoded { bv, set, unset } = self {
*self = BitField::Decoded(decode_and_apply_cache(bv, set, unset)?);
}
Ok(())
}
fn into_flushed(mut self) -> Result<BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
fn as_mut_flushed(&mut self) -> Result<&mut BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
/// Merges to bitfields together (equivalent of bitwise OR `|` operator)
pub fn merge(mut self, other: &Self) -> Result<Self> {
self.merge_assign(other)?;
Ok(self)
}
/// Merges to bitfields into `self` (equivalent of bitwise OR `|` operator)
pub fn merge_assign(&mut self, other: &Self) -> Result<()> {
let a = self.as_mut_flushed()?;
match other {
BitField::Encoded { bv, set, unset } => {
let v = decode_and_apply_cache(bv, set, unset)?;
bit_or(a, v.into_iter())
}
BitField::Decoded(bv) => bit_or(a, bv.iter().copied()),
}
Ok(())
}
/// Intersection of two bitfields (equivalent of bit AND `&`)
pub fn intersect(mut self, other: &Self) -> Result<Self> {
self.intersect_assign(other)?;
Ok(self)
}
/// Intersection of two bitfields and assigns to self (equivalent of bit AND `&`)
pub fn intersect_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied(),
}
Ok(())
}
/// Subtract other bitfield from self (equivalent of `a &!b`)
pub fn subtract(mut self, other: &Self) -> Result<Self> {
self.subtract_assign(other)?;
Ok(self)
}
/// Subtract other bitfield from self (equivalent of `a &!b`)
pub fn subtract_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &=!decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied().map(|b|!b),
}
Ok(())
}
/// Creates a bitfield which is a union of a vector of bitfields.
pub fn union<'a>(bit_fields: impl IntoIterator<Item = &'a Self>) -> Result<Self> {
let mut ret = Self::default();
for bf in bit_fields.into_iter() {
ret.merge_assign(bf)?;
}
Ok(ret)
}
/// Returns true if BitFields have any overlapping bits.
pub fn contains_any(&mut self, other: &mut BitField) -> Result<bool> {
for (&a, &b) in self
.as_mut_flushed()?
.iter()
.zip(other.as_mut_flushed()?.iter())
{
if a && b {
return Ok(true);
}
}
Ok(false)
}
/// Returns true if the self `BitField` has all the bits set in the other `BitField`.
pub fn contains_all(&mut self, other: &mut BitField) -> Result<bool> {
let a_bf = self.as_mut_flushed()?;
let b_bf = other.as_mut_flushed()?;
// Checking lengths should be sufficient in most cases, but does not take into account
// decoded bitfields with extra 0 bits. This makes sure there are no extra bits in the
// extension.
if b_bf.len() > a_bf.len() && b_bf[a_bf.len()..].count_ones() > 0 {
return Ok(false);
}
for (a, b) in a_bf.iter().zip(b_bf.iter()) {
if *b &&!a {
return Ok(false);
}
}
Ok(true)
}
}
fn bit_or<I>(a: &mut BitVec, mut b: I)
where
I: Iterator<Item = bool>,
{
for mut a_i in a.iter_mut() {
match b.next() {
Some(true) => *a_i = true,
Some(false) => (),
None => return,
}
}
a.extend(b);
}
fn decode_and_apply_cache(
bit_vec: &BitVec,
set: &FnvHashSet<u64>,
unset: &FnvHashSet<u64>,
) -> Result<BitVec> {
let mut decoded = rleplus::decode(bit_vec)?;
// Resize before setting any values
if let Some(&max) = set.iter().max() {
let max = max as usize;
if max >= bit_vec.len() {
decoded.resize(max + 1, false);
}
};
// Set all values in the cache
for &b in set.iter() {
decoded.set(b as usize, true);
}
// Unset all values from the encoded cache
for &b in unset.iter() {
decoded.set(b as usize, false);
}
Ok(decoded)
}
impl AsRef<BitField> for BitField {
fn as_ref(&self) -> &Self {
self
}
}
impl From<BitVec> for BitField {
fn from(b: BitVec) -> Self {
Self::Decoded(b)
}
}
impl<B> BitOr<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: B) -> Self {
self.merge(rhs.as_ref()).unwrap()
}
}
impl<B> BitOrAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitor_assign(&mut self, rhs: B) {
self.merge_assign(rhs.as_ref()).unwrap()
}
}
impl<B> BitAnd<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: B) -> Self::Output {
self.intersect(rhs.as_ref()).unwrap()
}
}
impl<B> BitAndAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitand_assign(&mut self, rhs: B) {
self.intersect_assign(rhs.as_ref()).unwrap()
}
}
impl Not for BitField {
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self::Decoded(!self.into_flushed().unwrap())
}
}
| default | identifier_name |
lib.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod bitvec_serde;
pub mod rleplus;
pub use bitvec;
use bitvec::prelude::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
use fnv::FnvHashSet;
use std::iter::FromIterator;
type BitVec = bitvec::prelude::BitVec<Lsb0, u8>;
type Result<T> = std::result::Result<T, &'static str>;
/// Represents a bitfield to track bits set at indexes in the range of `u64`.
#[derive(Debug, Clone)]
pub enum BitField {
Encoded {
bv: BitVec,
set: FnvHashSet<u64>,
unset: FnvHashSet<u64>,
},
// TODO would be beneficial in future to only keep encoded bitvec in memory, but comes at a cost
Decoded(BitVec),
}
impl Default for BitField {
fn default() -> Self {
Self::Decoded(BitVec::new())
}
}
impl BitField {
pub fn new() -> Self {
Self::default()
}
/// Generates a new bitfield with a slice of all indexes to set.
pub fn new_from_set(set_bits: &[u64]) -> Self {
let mut vec = match set_bits.iter().max() {
Some(&max) => bitvec![_, u8; 0; max as usize + 1],
None => return Self::new(),
};
// Set all bits in bitfield
for b in set_bits {
vec.set(*b as usize, true);
}
Self::Decoded(vec)
}
/// Sets bit at bit index provided
pub fn set(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset,.. } => {
unset.remove(&bit);
set.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
bv.resize(index + 1, false);
}
bv.set(index, true);
}
}
}
/// Removes bit at bit index provided
pub fn unset(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset,.. } => {
set.remove(&bit);
unset.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
return;
}
bv.set(index, false);
}
}
}
/// Gets the bit at the given index. | // TODO this probably should not require mut self and RLE decode bits
pub fn get(&mut self, index: u64) -> Result<bool> {
match self {
BitField::Encoded { set, unset,.. } => {
if set.contains(&index) {
return Ok(true);
}
if unset.contains(&index) {
return Ok(false);
}
// Check in encoded for the given bit
// This can be changed to not flush changes
if let Some(true) = self.as_mut_flushed()?.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
BitField::Decoded(bv) => {
if let Some(true) = bv.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
}
}
/// Retrieves the index of the first set bit, and error if invalid encoding or no bits set.
pub fn first(&mut self) -> Result<u64> {
for (i, b) in (0..).zip(self.as_mut_flushed()?.iter()) {
if b == &true {
return Ok(i);
}
}
// Return error if none found, not ideal but no reason not to match
Err("Bitfield has no set bits")
}
fn retrieve_set_indices<B: FromIterator<u64>>(&mut self, max: usize) -> Result<B> {
let flushed = self.as_mut_flushed()?;
if flushed.count_ones() > max {
return Err("Bits set exceeds max in retrieval");
}
Ok((0..)
.zip(flushed.iter())
.filter_map(|(i, b)| if b == &true { Some(i) } else { None })
.collect())
}
/// Returns a vector of indexes of all set bits
pub fn all(&mut self, max: usize) -> Result<Vec<u64>> {
self.retrieve_set_indices(max)
}
/// Returns a Hash set of indexes of all set bits
pub fn all_set(&mut self, max: usize) -> Result<FnvHashSet<u64>> {
self.retrieve_set_indices(max)
}
pub fn for_each<F>(&mut self, mut callback: F) -> std::result::Result<(), String>
where
F: FnMut(u64) -> std::result::Result<(), String>,
{
let flushed = self.as_mut_flushed()?;
for (i, &b) in (0..).zip(flushed.iter()) {
if b {
callback(i)?;
}
}
Ok(())
}
/// Returns true if there are no bits set, false if the bitfield is empty.
pub fn is_empty(&mut self) -> Result<bool> {
for b in self.as_mut_flushed()?.iter() {
if b == &true {
return Ok(false);
}
}
Ok(true)
}
/// Returns a slice of the bitfield with the start index of set bits
/// and number of bits to include in slice.
pub fn slice(&mut self, start: u64, count: u64) -> Result<BitField> {
if count == 0 {
return Ok(BitField::default());
}
// These conversions aren't ideal, but we aren't supporting 32 bit targets
let mut start = start as usize;
let mut count = count as usize;
let bitvec = self.as_mut_flushed()?;
let mut start_idx: usize = 0;
let mut range: usize = 0;
if start!= 0 {
for (i, v) in bitvec.iter().enumerate() {
if v == &true {
start -= 1;
if start == 0 {
start_idx = i + 1;
break;
}
}
}
}
for (i, v) in bitvec[start_idx..].iter().enumerate() {
if v == &true {
count -= 1;
if count == 0 {
range = i + 1;
break;
}
}
}
if count > 0 {
return Err("Not enough bits to index the slice");
}
let mut slice = BitVec::with_capacity(start_idx + range);
slice.resize(start_idx, false);
slice.extend_from_slice(&bitvec[start_idx..start_idx + range]);
Ok(BitField::Decoded(slice))
}
/// Retrieves number of set bits in the bitfield
///
/// This function requires a mutable reference for now to be able to handle the cached
/// changes in the case of an RLE encoded bitfield.
pub fn count(&mut self) -> Result<usize> {
Ok(self.as_mut_flushed()?.count_ones())
}
fn flush(&mut self) -> Result<()> {
if let BitField::Encoded { bv, set, unset } = self {
*self = BitField::Decoded(decode_and_apply_cache(bv, set, unset)?);
}
Ok(())
}
fn into_flushed(mut self) -> Result<BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
fn as_mut_flushed(&mut self) -> Result<&mut BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
/// Merges to bitfields together (equivalent of bitwise OR `|` operator)
pub fn merge(mut self, other: &Self) -> Result<Self> {
self.merge_assign(other)?;
Ok(self)
}
/// Merges to bitfields into `self` (equivalent of bitwise OR `|` operator)
pub fn merge_assign(&mut self, other: &Self) -> Result<()> {
let a = self.as_mut_flushed()?;
match other {
BitField::Encoded { bv, set, unset } => {
let v = decode_and_apply_cache(bv, set, unset)?;
bit_or(a, v.into_iter())
}
BitField::Decoded(bv) => bit_or(a, bv.iter().copied()),
}
Ok(())
}
/// Intersection of two bitfields (equivalent of bit AND `&`)
pub fn intersect(mut self, other: &Self) -> Result<Self> {
self.intersect_assign(other)?;
Ok(self)
}
/// Intersection of two bitfields and assigns to self (equivalent of bit AND `&`)
pub fn intersect_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied(),
}
Ok(())
}
/// Subtract other bitfield from self (equivalent of `a &!b`)
pub fn subtract(mut self, other: &Self) -> Result<Self> {
self.subtract_assign(other)?;
Ok(self)
}
/// Subtract other bitfield from self (equivalent of `a &!b`)
pub fn subtract_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &=!decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied().map(|b|!b),
}
Ok(())
}
/// Creates a bitfield which is a union of a vector of bitfields.
pub fn union<'a>(bit_fields: impl IntoIterator<Item = &'a Self>) -> Result<Self> {
let mut ret = Self::default();
for bf in bit_fields.into_iter() {
ret.merge_assign(bf)?;
}
Ok(ret)
}
/// Returns true if BitFields have any overlapping bits.
pub fn contains_any(&mut self, other: &mut BitField) -> Result<bool> {
for (&a, &b) in self
.as_mut_flushed()?
.iter()
.zip(other.as_mut_flushed()?.iter())
{
if a && b {
return Ok(true);
}
}
Ok(false)
}
/// Returns true if the self `BitField` has all the bits set in the other `BitField`.
pub fn contains_all(&mut self, other: &mut BitField) -> Result<bool> {
let a_bf = self.as_mut_flushed()?;
let b_bf = other.as_mut_flushed()?;
// Checking lengths should be sufficient in most cases, but does not take into account
// decoded bitfields with extra 0 bits. This makes sure there are no extra bits in the
// extension.
if b_bf.len() > a_bf.len() && b_bf[a_bf.len()..].count_ones() > 0 {
return Ok(false);
}
for (a, b) in a_bf.iter().zip(b_bf.iter()) {
if *b &&!a {
return Ok(false);
}
}
Ok(true)
}
}
fn bit_or<I>(a: &mut BitVec, mut b: I)
where
I: Iterator<Item = bool>,
{
for mut a_i in a.iter_mut() {
match b.next() {
Some(true) => *a_i = true,
Some(false) => (),
None => return,
}
}
a.extend(b);
}
fn decode_and_apply_cache(
bit_vec: &BitVec,
set: &FnvHashSet<u64>,
unset: &FnvHashSet<u64>,
) -> Result<BitVec> {
let mut decoded = rleplus::decode(bit_vec)?;
// Resize before setting any values
if let Some(&max) = set.iter().max() {
let max = max as usize;
if max >= bit_vec.len() {
decoded.resize(max + 1, false);
}
};
// Set all values in the cache
for &b in set.iter() {
decoded.set(b as usize, true);
}
// Unset all values from the encoded cache
for &b in unset.iter() {
decoded.set(b as usize, false);
}
Ok(decoded)
}
impl AsRef<BitField> for BitField {
fn as_ref(&self) -> &Self {
self
}
}
impl From<BitVec> for BitField {
fn from(b: BitVec) -> Self {
Self::Decoded(b)
}
}
impl<B> BitOr<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: B) -> Self {
self.merge(rhs.as_ref()).unwrap()
}
}
impl<B> BitOrAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitor_assign(&mut self, rhs: B) {
self.merge_assign(rhs.as_ref()).unwrap()
}
}
impl<B> BitAnd<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: B) -> Self::Output {
self.intersect(rhs.as_ref()).unwrap()
}
}
impl<B> BitAndAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitand_assign(&mut self, rhs: B) {
self.intersect_assign(rhs.as_ref()).unwrap()
}
}
impl Not for BitField {
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self::Decoded(!self.into_flushed().unwrap())
}
} | random_line_split |
|
lib.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod bitvec_serde;
pub mod rleplus;
pub use bitvec;
use bitvec::prelude::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
use fnv::FnvHashSet;
use std::iter::FromIterator;
type BitVec = bitvec::prelude::BitVec<Lsb0, u8>;
type Result<T> = std::result::Result<T, &'static str>;
/// Represents a bitfield to track bits set at indexes in the range of `u64`.
#[derive(Debug, Clone)]
pub enum BitField {
Encoded {
bv: BitVec,
set: FnvHashSet<u64>,
unset: FnvHashSet<u64>,
},
// TODO would be beneficial in future to only keep encoded bitvec in memory, but comes at a cost
Decoded(BitVec),
}
impl Default for BitField {
fn default() -> Self {
Self::Decoded(BitVec::new())
}
}
impl BitField {
pub fn new() -> Self {
Self::default()
}
/// Generates a new bitfield with a slice of all indexes to set.
pub fn new_from_set(set_bits: &[u64]) -> Self {
let mut vec = match set_bits.iter().max() {
Some(&max) => bitvec![_, u8; 0; max as usize + 1],
None => return Self::new(),
};
// Set all bits in bitfield
for b in set_bits {
vec.set(*b as usize, true);
}
Self::Decoded(vec)
}
/// Sets bit at bit index provided
pub fn set(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset,.. } => {
unset.remove(&bit);
set.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
bv.resize(index + 1, false);
}
bv.set(index, true);
}
}
}
/// Removes bit at bit index provided
pub fn unset(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset,.. } => {
set.remove(&bit);
unset.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
return;
}
bv.set(index, false);
}
}
}
/// Gets the bit at the given index.
// TODO this probably should not require mut self and RLE decode bits
pub fn get(&mut self, index: u64) -> Result<bool> {
match self {
BitField::Encoded { set, unset,.. } => {
if set.contains(&index) {
return Ok(true);
}
if unset.contains(&index) {
return Ok(false);
}
// Check in encoded for the given bit
// This can be changed to not flush changes
if let Some(true) = self.as_mut_flushed()?.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
BitField::Decoded(bv) => {
if let Some(true) = bv.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
}
}
/// Retrieves the index of the first set bit, and error if invalid encoding or no bits set.
pub fn first(&mut self) -> Result<u64> {
for (i, b) in (0..).zip(self.as_mut_flushed()?.iter()) {
if b == &true {
return Ok(i);
}
}
// Return error if none found, not ideal but no reason not to match
Err("Bitfield has no set bits")
}
fn retrieve_set_indices<B: FromIterator<u64>>(&mut self, max: usize) -> Result<B> {
let flushed = self.as_mut_flushed()?;
if flushed.count_ones() > max {
return Err("Bits set exceeds max in retrieval");
}
Ok((0..)
.zip(flushed.iter())
.filter_map(|(i, b)| if b == &true { Some(i) } else { None })
.collect())
}
/// Returns a vector of indexes of all set bits
pub fn all(&mut self, max: usize) -> Result<Vec<u64>> {
self.retrieve_set_indices(max)
}
/// Returns a Hash set of indexes of all set bits
pub fn all_set(&mut self, max: usize) -> Result<FnvHashSet<u64>> {
self.retrieve_set_indices(max)
}
pub fn for_each<F>(&mut self, mut callback: F) -> std::result::Result<(), String>
where
F: FnMut(u64) -> std::result::Result<(), String>,
{
let flushed = self.as_mut_flushed()?;
for (i, &b) in (0..).zip(flushed.iter()) {
if b {
callback(i)?;
}
}
Ok(())
}
/// Returns true if there are no bits set, false if the bitfield is empty.
pub fn is_empty(&mut self) -> Result<bool> {
for b in self.as_mut_flushed()?.iter() {
if b == &true {
return Ok(false);
}
}
Ok(true)
}
/// Returns a slice of the bitfield with the start index of set bits
/// and number of bits to include in slice.
pub fn slice(&mut self, start: u64, count: u64) -> Result<BitField> {
if count == 0 {
return Ok(BitField::default());
}
// These conversions aren't ideal, but we aren't supporting 32 bit targets
let mut start = start as usize;
let mut count = count as usize;
let bitvec = self.as_mut_flushed()?;
let mut start_idx: usize = 0;
let mut range: usize = 0;
if start!= 0 {
for (i, v) in bitvec.iter().enumerate() {
if v == &true {
start -= 1;
if start == 0 {
start_idx = i + 1;
break;
}
}
}
}
for (i, v) in bitvec[start_idx..].iter().enumerate() {
if v == &true {
count -= 1;
if count == 0 {
range = i + 1;
break;
}
}
}
if count > 0 {
return Err("Not enough bits to index the slice");
}
let mut slice = BitVec::with_capacity(start_idx + range);
slice.resize(start_idx, false);
slice.extend_from_slice(&bitvec[start_idx..start_idx + range]);
Ok(BitField::Decoded(slice))
}
/// Retrieves number of set bits in the bitfield
///
/// This function requires a mutable reference for now to be able to handle the cached
/// changes in the case of an RLE encoded bitfield.
pub fn count(&mut self) -> Result<usize> {
Ok(self.as_mut_flushed()?.count_ones())
}
fn flush(&mut self) -> Result<()> {
if let BitField::Encoded { bv, set, unset } = self {
*self = BitField::Decoded(decode_and_apply_cache(bv, set, unset)?);
}
Ok(())
}
fn into_flushed(mut self) -> Result<BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
fn as_mut_flushed(&mut self) -> Result<&mut BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
/// Merges to bitfields together (equivalent of bitwise OR `|` operator)
pub fn merge(mut self, other: &Self) -> Result<Self> {
self.merge_assign(other)?;
Ok(self)
}
/// Merges to bitfields into `self` (equivalent of bitwise OR `|` operator)
pub fn merge_assign(&mut self, other: &Self) -> Result<()> {
let a = self.as_mut_flushed()?;
match other {
BitField::Encoded { bv, set, unset } => {
let v = decode_and_apply_cache(bv, set, unset)?;
bit_or(a, v.into_iter())
}
BitField::Decoded(bv) => bit_or(a, bv.iter().copied()),
}
Ok(())
}
/// Intersection of two bitfields (equivalent of bit AND `&`)
pub fn intersect(mut self, other: &Self) -> Result<Self> |
/// Intersection of two bitfields and assigns to self (equivalent of bit AND `&`)
pub fn intersect_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied(),
}
Ok(())
}
/// Subtract other bitfield from self (equivalent of `a &!b`)
pub fn subtract(mut self, other: &Self) -> Result<Self> {
self.subtract_assign(other)?;
Ok(self)
}
/// Subtract other bitfield from self (equivalent of `a &!b`)
pub fn subtract_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &=!decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied().map(|b|!b),
}
Ok(())
}
/// Creates a bitfield which is a union of a vector of bitfields.
pub fn union<'a>(bit_fields: impl IntoIterator<Item = &'a Self>) -> Result<Self> {
let mut ret = Self::default();
for bf in bit_fields.into_iter() {
ret.merge_assign(bf)?;
}
Ok(ret)
}
/// Returns true if BitFields have any overlapping bits.
pub fn contains_any(&mut self, other: &mut BitField) -> Result<bool> {
for (&a, &b) in self
.as_mut_flushed()?
.iter()
.zip(other.as_mut_flushed()?.iter())
{
if a && b {
return Ok(true);
}
}
Ok(false)
}
/// Returns true if the self `BitField` has all the bits set in the other `BitField`.
pub fn contains_all(&mut self, other: &mut BitField) -> Result<bool> {
let a_bf = self.as_mut_flushed()?;
let b_bf = other.as_mut_flushed()?;
// Checking lengths should be sufficient in most cases, but does not take into account
// decoded bitfields with extra 0 bits. This makes sure there are no extra bits in the
// extension.
if b_bf.len() > a_bf.len() && b_bf[a_bf.len()..].count_ones() > 0 {
return Ok(false);
}
for (a, b) in a_bf.iter().zip(b_bf.iter()) {
if *b &&!a {
return Ok(false);
}
}
Ok(true)
}
}
fn bit_or<I>(a: &mut BitVec, mut b: I)
where
I: Iterator<Item = bool>,
{
for mut a_i in a.iter_mut() {
match b.next() {
Some(true) => *a_i = true,
Some(false) => (),
None => return,
}
}
a.extend(b);
}
fn decode_and_apply_cache(
bit_vec: &BitVec,
set: &FnvHashSet<u64>,
unset: &FnvHashSet<u64>,
) -> Result<BitVec> {
let mut decoded = rleplus::decode(bit_vec)?;
// Resize before setting any values
if let Some(&max) = set.iter().max() {
let max = max as usize;
if max >= bit_vec.len() {
decoded.resize(max + 1, false);
}
};
// Set all values in the cache
for &b in set.iter() {
decoded.set(b as usize, true);
}
// Unset all values from the encoded cache
for &b in unset.iter() {
decoded.set(b as usize, false);
}
Ok(decoded)
}
impl AsRef<BitField> for BitField {
fn as_ref(&self) -> &Self {
self
}
}
impl From<BitVec> for BitField {
fn from(b: BitVec) -> Self {
Self::Decoded(b)
}
}
impl<B> BitOr<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: B) -> Self {
self.merge(rhs.as_ref()).unwrap()
}
}
impl<B> BitOrAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitor_assign(&mut self, rhs: B) {
self.merge_assign(rhs.as_ref()).unwrap()
}
}
impl<B> BitAnd<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: B) -> Self::Output {
self.intersect(rhs.as_ref()).unwrap()
}
}
impl<B> BitAndAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitand_assign(&mut self, rhs: B) {
self.intersect_assign(rhs.as_ref()).unwrap()
}
}
impl Not for BitField {
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self::Decoded(!self.into_flushed().unwrap())
}
}
| {
self.intersect_assign(other)?;
Ok(self)
} | identifier_body |
nfa.rs | //! The structure for defining non-deterministic finite automata.
use crate::automata::alphabet;
use crate::automata::dfa::DFA;
use crate::automata::dfa::RuleExecutable;
use crate::automata::pattern::Pattern;
use crate::automata::state::State;
use crate::automata::state::Transition;
use crate::automata::state;
use crate::automata::symbol::Symbol;
use crate::data::matrix::Matrix;
use itertools::Itertools;
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::ops::RangeInclusive;
use crate::prelude::*;
// =========================================
// === Non-Deterministic Finite Automata ===
// =========================================
/// A state identifier based on a set of states.
///
/// This is used during the NFA -> DFA transformation, where multiple states can merge together due
/// to the collapsing of epsilon transitions.
type StateSetId = BTreeSet<state::Identifier>;
/// The definition of a [NFA](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton) for a
/// given set of symbols, states, and transitions (specifically a NFA with ε-moves).
///
/// A NFA is a finite state automaton that accepts or rejects a given sequence of symbols. In
/// contrast with a DFA, the NFA may transition between states _without_ reading any new symbol
/// through use of
/// [epsilon links](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton#NFA_with_%CE%B5-moves).
///
/// ```text
/// ┌───┐ 'N' ┌───┐ ┌───┐ 'F' ┌───┐ ┌───┐ 'A' ┌───┐
/// │ 0 │ ----> │ 1 │ -> │ 2 │ ----> │ 3 │ -> │ 3 │ ----> │ 3 │
/// └───┘ └───┘ ε └───┘ └───┘ ε └───┘ └───┘
/// ```
#[derive(Clone,Debug,Default,PartialEq,Eq)]
pub struct NFA {
/// A set of disjoint intervals over the input alphabet.
pub alphabet_segmentation:alphabet::Segmentation,
/// A set of named NFA states, with (epsilon) transitions.
pub states:Vec<State>,
}
impl NFA {
/// Adds a new state to the NFA and returns its identifier.
pub fn new_state(&mut self) -> state::Identifier {
let id = self.states.len();
self.states.push(State::default());
state::Identifier{id}
}
/// Creates an epsilon transition between two states.
///
/// Whenever the automaton happens to be in `source` state it can immediately transition to the
/// `target` state. It is, however, not _required_ to do so.
pub fn connect(&mut self, source:state::Identifier, target:state::Identifier) {
self.states[source.id].epsilon_links.push(target);
}
/// Creates an ordinary transition for a range of symbols.
///
/// If any symbol from such range happens to be the input when the automaton is in the `source`
/// state, it will immediately transition to the `target` state.
pub fn connect_via
( &mut self
, source : state::Identifier
, target_state : state::Identifier
, symbols : &RangeInclusive<Symbol>
) {
self.alphabet_segmentation.insert(symbols.clone());
self.states[source.id].links.push(Transition{symbols:symbols.clone(),target_state});
}
/// Transforms a pattern to an NFA using the algorithm described
/// [here](https://www.youtube.com/watch?v=RYNN-tb9WxI).
/// The asymptotic complexity is linear in number of symbols.
pub fn new_pattern(&mut self, source:state::Identifier, pattern:&Pattern) -> state::Identifier {
let current = self.new_state();
self.connect(source,current);
match pattern {
Pattern::Range(range) => {
let state = self.new_state();
self.connect_via(current,state,range);
state
},
Pattern::Many(body) => {
let s1 = self.new_state();
let s2 = self.new_pattern(s1,body);
let s3 = self.new_state();
self.connect(current,s1);
self.connect(current,s3);
self.connect(s2,s3);
self.connect(s3,s1);
s3
},
Pattern::Seq(patterns) => {
patterns.iter().fold(current,|s,pat| self.new_pattern(s,pat))
},
Pattern::Or(patterns) => {
let states = patterns.iter().map(|pat| self.new_pattern(current,pat)).collect_vec();
let end = self.new_state();
for state in states {
self.connect(state,end);
}
end
},
Pattern::Always => current,
}
}
/// Merges states that are connected by epsilon links, using an algorithm based on the one shown
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
fn eps_matrix(&self) -> Vec<StateSetId> {
fn fill_eps_matrix
( nfa : &NFA
, states : &mut Vec<StateSetId>
, visited : &mut Vec<bool>
, state : state::Identifier
) {
let mut state_set = StateSetId::new();
visited[state.id] = true;
state_set.insert(state);
for &target in &nfa.states[state.id].epsilon_links {
if!visited[target.id] {
fill_eps_matrix(nfa,states,visited,target);
}
state_set.insert(target);
state_set.extend(states[target.id].iter());
}
states[state.id] = state_set;
}
let mut states = vec![StateSetId::new(); self.states.len()];
for id in 0..self.states.len() {
let mut visited = vec![false; states.len()];
fill_eps_matrix(self,&mut states,&mut visited,state::Identifier{id});
}
states
}
/// Computes a transition matrix `(state, symbol) => state` for the NFA, ignoring epsilon links.
fn nfa_matrix(&self) -> Matrix<state::Identifier> {
let mut matrix = Matrix::new(self.states.len(),self.alphabet_segmentation.divisions.len()) | for (state_ix, source) in self.states.iter().enumerate() {
let targets = source.targets(&self.alphabet_segmentation);
for (voc_ix, &target) in targets.iter().enumerate() {
matrix[(state_ix,voc_ix)] = target;
}
}
matrix
}
}
// === Trait Impls ===
impl From<&NFA> for DFA {
/// Transforms an NFA into a DFA, based on the algorithm described
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
/// The asymptotic complexity is quadratic in number of states.
fn from(nfa:&NFA) -> Self {
let nfa_mat = nfa.nfa_matrix();
let eps_mat = nfa.eps_matrix();
let mut dfa_mat = Matrix::new(0,nfa.alphabet_segmentation.divisions.len());
let mut dfa_eps_ixs = Vec::<StateSetId>::new();
let mut dfa_eps_map = HashMap::<StateSetId,state::Identifier>::new();
dfa_eps_ixs.push(eps_mat[0].clone());
dfa_eps_map.insert(eps_mat[0].clone(),state::Identifier::from(0));
let mut i = 0;
while i < dfa_eps_ixs.len() {
dfa_mat.new_row();
for voc_ix in 0..nfa.alphabet_segmentation.divisions.len() {
let mut eps_set = StateSetId::new();
for &eps_ix in &dfa_eps_ixs[i] {
let tgt = nfa_mat[(eps_ix.id,voc_ix)];
if tgt!= state::Identifier::INVALID {
eps_set.extend(eps_mat[tgt.id].iter());
}
}
if!eps_set.is_empty() {
dfa_mat[(i,voc_ix)] = match dfa_eps_map.get(&eps_set) {
Some(&id) => id,
None => {
let id = state::Identifier::new(dfa_eps_ixs.len());
dfa_eps_ixs.push(eps_set.clone());
dfa_eps_map.insert(eps_set,id);
id
},
};
}
}
i += 1;
}
let mut callbacks = vec![None; dfa_eps_ixs.len()];
let priority = dfa_eps_ixs.len();
for (dfa_ix, epss) in dfa_eps_ixs.into_iter().enumerate() {
let has_name = |&key:&state::Identifier| nfa.states[key.id].name.is_some();
if let Some(eps) = epss.into_iter().find(has_name) {
let code = nfa.states[eps.id].name.as_ref().cloned().unwrap();
callbacks[dfa_ix] = Some(RuleExecutable {code,priority});
}
}
let alphabet_segmentation = nfa.alphabet_segmentation.clone();
let links = dfa_mat;
DFA{alphabet_segmentation,links,callbacks}
}
}
// ===========
// == Tests ==
// ===========
#[cfg(test)]
pub mod tests {
extern crate test;
use crate::automata::dfa;
use super::*;
use test::Bencher;
/// NFA that accepts a newline '\n'.
pub fn newline() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(10..=10,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![10, 11].as_slice()),
}
}
/// NFA that accepts any letter in the range a..=z.
pub fn letter() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(97..=122,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![97, 123].as_slice()),
}
}
/// NFA that accepts any number of spaces''.
pub fn spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![2]),
State::from(vec![(32..=32,3)]),
State::from(vec![4]),
State::from(vec![5,8]),
State::from(vec![6]),
State::from(vec![(32..=32,7)]),
State::from(vec![8]),
State::from(vec![5,9]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![0, 32, 33].as_slice()),
}
}
/// NFA that accepts one letter a..=z or many spaces''.
pub fn letter_and_spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1,3]),
State::from(vec![(97..=122,2)]),
State::from(vec![11]).named("group_0_rule_0"),
State::from(vec![4]),
State::from(vec![(32..=32,5)]),
State::from(vec![6]),
State::from(vec![7,10]),
State::from(vec![8]),
State::from(vec![(32..=32,9)]),
State::from(vec![10]),
State::from(vec![7,11]).named("group_0_rule_1"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![32, 33, 97, 123].as_slice()),
}
}
#[test]
fn test_to_dfa_newline() {
assert_eq!(DFA::from(&newline()),dfa::tests::newline());
}
#[test]
fn test_to_dfa_letter() {
assert_eq!(DFA::from(&letter()),dfa::tests::letter());
}
#[test]
fn test_to_dfa_spaces() {
assert_eq!(DFA::from(&spaces()),dfa::tests::spaces());
}
#[test]
fn test_to_dfa_letter_and_spaces() {
assert_eq!(DFA::from(&letter_and_spaces()),dfa::tests::letter_and_spaces());
}
#[bench]
fn bench_to_dfa_newline(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&newline()))
}
#[bench]
fn bench_to_dfa_letter(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter()))
}
#[bench]
fn bench_to_dfa_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&spaces()))
}
#[bench]
fn bench_to_dfa_letter_and_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter_and_spaces()))
}
}
| ;
| identifier_name |
nfa.rs | //! The structure for defining non-deterministic finite automata.
use crate::automata::alphabet;
use crate::automata::dfa::DFA;
use crate::automata::dfa::RuleExecutable;
use crate::automata::pattern::Pattern;
use crate::automata::state::State;
use crate::automata::state::Transition;
use crate::automata::state;
use crate::automata::symbol::Symbol;
use crate::data::matrix::Matrix;
use itertools::Itertools;
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::ops::RangeInclusive;
use crate::prelude::*;
// =========================================
// === Non-Deterministic Finite Automata ===
// =========================================
/// A state identifier based on a set of states.
///
/// This is used during the NFA -> DFA transformation, where multiple states can merge together due
/// to the collapsing of epsilon transitions.
type StateSetId = BTreeSet<state::Identifier>;
/// The definition of a [NFA](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton) for a
/// given set of symbols, states, and transitions (specifically a NFA with ε-moves).
///
/// A NFA is a finite state automaton that accepts or rejects a given sequence of symbols. In
/// contrast with a DFA, the NFA may transition between states _without_ reading any new symbol
/// through use of
/// [epsilon links](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton#NFA_with_%CE%B5-moves).
///
/// ```text
/// ┌───┐ 'N' ┌───┐ ┌───┐ 'F' ┌───┐ ┌───┐ 'A' ┌───┐
/// │ 0 │ ----> │ 1 │ -> │ 2 │ ----> │ 3 │ -> │ 3 │ ----> │ 3 │
/// └───┘ └───┘ ε └───┘ └───┘ ε └───┘ └───┘
/// ```
#[derive(Clone,Debug,Default,PartialEq,Eq)]
pub struct NFA {
/// A set of disjoint intervals over the input alphabet.
pub alphabet_segmentation:alphabet::Segmentation,
/// A set of named NFA states, with (epsilon) transitions.
pub states:Vec<State>,
}
impl NFA {
/// Adds a new state to the NFA and returns its identifier.
pub fn new_state(&mut self) -> state::Identifier {
let id = self.states.len();
self.states.push(State::default());
state::Identifier{id}
}
/// Creates an epsilon transition between two states.
///
/// Whenever the automaton happens to be in `source` state it can immediately transition to the
/// `target` state. It is, however, not _required_ to do so.
pub fn connect(&mut self, source:state::Identifier, target:state::Identifier) {
self.states[source.id].epsilon_links.push(target);
}
/// Creates an ordinary transition for a range of symbols.
///
/// If any symbol from such range happens to be the input when the automaton is in the `source`
/// state, it will immediately transition to the `target` state.
pub fn connect_via
( &mut self
, source : state::Identifier
, target_state : state::Identifier
, symbols : &RangeInclusive<Symbol>
) {
self.alphabet_segmentation.insert(symbols.clone());
self.states[source.id].links.push(Transition{symbols:symbols.clone(),target_state});
}
/// Transforms a pattern to an NFA using the algorithm described
/// [here](https://www.youtube.com/watch?v=RYNN-tb9WxI).
/// The asymptotic complexity is linear in number of symbols.
pub fn new_pattern(&mut self, source:state::Identifier, pattern:&Pattern) -> state::Identifier {
let current = self.new_state();
self.connect(source,current);
match pattern {
Pattern::Range(range) => {
let state = self.new_state();
self.connect_via(current,state,range);
state
},
Pattern::Many(body) => {
let s1 = self.new_state();
let s2 = self.new_pattern(s1,body);
let s3 = self.new_state();
self.connect(current,s1);
self.connect(current,s3);
self.connect(s2,s3);
self.connect(s3,s1);
s3
},
Pattern::Seq(patterns) => {
patterns.iter().fold(current,|s,pat| self.new_pattern(s,pat))
},
Pattern::Or(patterns) => {
let states = patterns.iter().map(|pat| self.new_pattern(current,pat)).collect_vec();
let end = self.new_state();
for state in states {
self.connect(state,end);
}
end
},
Pattern::Always => current,
}
}
/// Merges states that are connected by epsilon links, using an algorithm based on the one shown
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
fn eps_matrix(&self) -> Vec<StateSetId> {
fn fill_eps_matrix
( nfa : &NFA
, states : &mut Vec<StateSetId>
, visited : &mut Vec<bool>
, state : state::Identifier
) {
let mut state_set = StateSetId::new();
visited[state.id] = true;
state_set.insert(state);
for &target in &nfa.states[state.id].epsilon_links {
if!visited[target.id] {
fill_eps_matrix(nfa,states,visited,target);
}
state_set.insert(target);
state_set.extend(states[target.id].iter());
}
states[state.id] = state_set;
}
let mut states = vec![StateSetId::new(); self.states.len()];
for id in 0..self.states.len() {
let mut visited = vec![false; states.len()];
fill_eps_matrix(self,&mut states,&mut visited,state::Identifier{id});
}
states
}
/// Computes a transition matrix `(state, symbol) => state` for the NFA, ignoring epsilon links.
fn nfa_matrix(&self) -> Matrix<state::Identifier> {
let mut matrix = Matrix::new(self.states.len(),self.alphabet_segmentation.divisions.len());
for (state_ix, source) in self.states.iter().enumerate() {
let targets = source.targets(&self.alphabet_segmentation);
for (voc_ix, &target) in targets.iter().enumerate() {
matrix[(state_ix,voc_ix)] = target;
}
}
matrix
}
}
// === Trait Impls ===
impl From<&NFA> for DFA {
/// Transforms an NFA into a DFA, based on the algorithm described
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
/// The asymptotic complexity is quadratic in number of states.
fn from(nfa:&NFA) -> Self {
let nfa_mat = nfa.nfa_matrix();
let eps_mat = nfa.eps_matrix();
let mut dfa_mat = Matrix::new(0,nfa.alphabet_segmentation.divisions.len());
let mut dfa_eps_ixs = Vec::<StateSetId>::new();
let mut dfa_eps_map = HashMap::<StateSetId,state::Identifier>::new();
dfa_eps_ixs.push(eps_mat[0].clone());
dfa_eps_map.insert(eps_mat[0].clone(),state::Identifier::from(0));
let mut i = 0;
while i < dfa_eps_ixs.len() {
dfa_mat.new_row();
for voc_ix in 0..nfa.alphabet_segmentation.divisions.len() {
let mut eps_set = StateSetId::new();
for &eps_ix in &dfa_eps_ixs[i] {
let tgt = nfa_mat[(eps_ix.id,voc_ix)];
if tgt!= state::Identifier::INVALID {
eps_set.extend(eps_mat[tgt.id].iter());
}
}
if!eps_set.is_empty() {
dfa_mat[(i,voc_ix)] = match dfa_eps_map.get(&eps_set) {
Some(&id) => id,
None => {
let id = state::Identifier::new(dfa_eps_ixs.len());
dfa_eps_ixs.push(eps_set.clone());
dfa_eps_map.insert(eps_set,id);
id
},
};
}
}
i += 1;
}
let mut callbacks = vec![None; dfa_eps_ixs.len()];
let priority = dfa_eps_ixs.len();
for (dfa_ix, epss) in dfa_eps_ixs.into_iter().enumerate() {
let has_name = |&key:&state::Identifier| nfa.states[key.id].name.is_some();
if let Some(eps) = epss.into_iter().find(has_name) {
let code = nfa.states[eps.id].name.as_ref().cloned().unwrap();
callbacks[dfa_ix] = Some(RuleExecutable {code,priority});
}
}
let alphabet_segmentation = nfa.alphabet_segmentation.clone();
let links = dfa_mat;
DFA{alphabet_segmentation,links,callbacks}
}
}
// ===========
// == Tests ==
// ===========
#[cfg(test)]
pub mod tests {
extern crate test;
use crate::automata::dfa;
use super::*;
use test::Bencher;
/// NFA that accepts a newline '\n'.
pub fn newline() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(10..=10,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![10, 11].as_slice()),
}
}
/// NFA that accepts any letter in the range a..=z.
pub fn letter() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(97..=122,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![97, 123].as_slice()),
}
}
/// NFA that accepts any number of spaces''.
pub fn spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![2]),
State::from(vec![(32..=32,3)]),
State::from(vec![4]),
State::from(vec![5,8]),
State::from(vec![6]),
State::from(vec![(32..=32,7)]),
State::from(vec![8]),
State::from(vec![5,9]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![0, 32, 33].as_slice()),
}
}
/// NFA that accepts one letter a..=z or many spaces''.
pub fn letter_and_spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1,3]),
State::from(vec![(97..=122,2)]),
S | #[test]
fn test_to_dfa
_letter() {
assert_eq!(DFA::from(&letter()),dfa::tests::letter());
}
#[test]
fn test_to_dfa_spaces() {
assert_eq!(DFA::from(&spaces()),dfa::tests::spaces());
}
#[test]
fn test_to_dfa_letter_and_spaces() {
assert_eq!(DFA::from(&letter_and_spaces()),dfa::tests::letter_and_spaces());
}
#[bench]
fn bench_to_dfa_newline(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&newline()))
}
#[bench]
fn bench_to_dfa_letter(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter()))
}
#[bench]
fn bench_to_dfa_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&spaces()))
}
#[bench]
fn bench_to_dfa_letter_and_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter_and_spaces()))
}
}
| tate::from(vec![11]).named("group_0_rule_0"),
State::from(vec![4]),
State::from(vec![(32..=32,5)]),
State::from(vec![6]),
State::from(vec![7,10]),
State::from(vec![8]),
State::from(vec![(32..=32,9)]),
State::from(vec![10]),
State::from(vec![7,11]).named("group_0_rule_1"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![32, 33, 97, 123].as_slice()),
}
}
#[test]
fn test_to_dfa_newline() {
assert_eq!(DFA::from(&newline()),dfa::tests::newline());
}
| identifier_body |
nfa.rs | //! The structure for defining non-deterministic finite automata.
use crate::automata::alphabet;
use crate::automata::dfa::DFA;
use crate::automata::dfa::RuleExecutable;
use crate::automata::pattern::Pattern;
use crate::automata::state::State;
use crate::automata::state::Transition;
use crate::automata::state;
use crate::automata::symbol::Symbol;
use crate::data::matrix::Matrix;
use itertools::Itertools;
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::ops::RangeInclusive;
use crate::prelude::*;
// =========================================
// === Non-Deterministic Finite Automata ===
// =========================================
/// A state identifier based on a set of states.
///
/// This is used during the NFA -> DFA transformation, where multiple states can merge together due
/// to the collapsing of epsilon transitions.
type StateSetId = BTreeSet<state::Identifier>;
/// The definition of a [NFA](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton) for a
/// given set of symbols, states, and transitions (specifically a NFA with ε-moves).
///
/// A NFA is a finite state automaton that accepts or rejects a given sequence of symbols. In
/// contrast with a DFA, the NFA may transition between states _without_ reading any new symbol
/// through use of
/// [epsilon links](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton#NFA_with_%CE%B5-moves).
///
/// ```text
/// ┌───┐ 'N' ┌───┐ ┌───┐ 'F' ┌───┐ ┌───┐ 'A' ┌───┐
/// │ 0 │ ----> │ 1 │ -> │ 2 │ ----> │ 3 │ -> │ 3 │ ----> │ 3 │
/// └───┘ └───┘ ε └───┘ └───┘ ε └───┘ └───┘
/// ```
#[derive(Clone,Debug,Default,PartialEq,Eq)]
pub struct NFA {
/// A set of disjoint intervals over the input alphabet.
pub alphabet_segmentation:alphabet::Segmentation,
/// A set of named NFA states, with (epsilon) transitions.
pub states:Vec<State>,
}
impl NFA {
/// Adds a new state to the NFA and returns its identifier.
pub fn new_state(&mut self) -> state::Identifier {
let id = self.states.len();
self.states.push(State::default());
state::Identifier{id}
}
/// Creates an epsilon transition between two states.
///
/// Whenever the automaton happens to be in `source` state it can immediately transition to the
/// `target` state. It is, however, not _required_ to do so.
pub fn connect(&mut self, source:state::Identifier, target:state::Identifier) {
self.states[source.id].epsilon_links.push(target);
}
/// Creates an ordinary transition for a range of symbols.
///
/// If any symbol from such range happens to be the input when the automaton is in the `source`
/// state, it will immediately transition to the `target` state.
pub fn connect_via
( &mut self
, source : state::Identifier
, target_state : state::Identifier
, symbols : &RangeInclusive<Symbol>
) {
self.alphabet_segmentation.insert(symbols.clone());
self.states[source.id].links.push(Transition{symbols:symbols.clone(),target_state});
}
/// Transforms a pattern to an NFA using the algorithm described
/// [here](https://www.youtube.com/watch?v=RYNN-tb9WxI).
/// The asymptotic complexity is linear in number of symbols.
pub fn new_pattern(&mut self, source:state::Identifier, pattern:&Pattern) -> state::Identifier {
let current = self.new_state();
self.connect(source,current);
match pattern {
Pattern::Range(range) => {
let state = self.new_state();
self.connect_via(current,state,range);
state
},
Pattern::Many(body) => {
let s1 = self.new_state();
let s2 = self.new_pattern(s1,body);
let s3 = self.new_state();
self.connect(current,s1);
self.connect(current,s3);
self.connect(s2,s3);
self.connect(s3,s1);
s3
},
Pattern::Seq(patterns) => {
patterns.iter().fold(current,|s,pat| self.new_pattern(s,pat))
},
Pattern::Or(patterns) => {
let states = patterns.iter().map(|pat| self.new_pattern(current,pat)).collect_vec();
let end = self.new_state();
for state in states {
self.connect(state,end);
}
end
},
Pattern::Always => current,
}
}
/// Merges states that are connected by epsilon links, using an algorithm based on the one shown
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
fn eps_matrix(&self) -> Vec<StateSetId> {
fn fill_eps_matrix
( nfa : &NFA
, states : &mut Vec<StateSetId>
, visited : &mut Vec<bool>
, state : state::Identifier
) {
let mut state_set = StateSetId::new();
visited[state.id] = true;
state_set.insert(state);
for &target in &nfa.states[state.id].epsilon_links {
if!visited[target.id] {
fill_eps_matrix(nfa,states,visited,target);
}
state_set.insert(target);
state_set.extend(states[target.id].iter());
}
states[state.id] = state_set;
}
let mut states = vec![StateSetId::new(); self.states.len()];
for id in 0..self.states.len() {
let mut visited = vec![false; states.len()];
fill_eps_matrix(self,&mut states,&mut visited,state::Identifier{id});
}
states
}
/// Computes a transition matrix `(state, symbol) => state` for the NFA, ignoring epsilon links.
fn nfa_matrix(&self) -> Matrix<state::Identifier> {
let mut matrix = Matrix::new(self.states.len(),self.alphabet_segmentation.divisions.len());
for (state_ix, source) in self.states.iter().enumerate() {
let targets = source.targets(&self.alphabet_segmentation);
for (voc_ix, &target) in targets.iter().enumerate() {
matrix[(state_ix,voc_ix)] = target;
}
}
matrix
}
}
// === Trait Impls ===
impl From<&NFA> for DFA {
/// Transforms an NFA into a DFA, based on the algorithm described
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
/// The asymptotic complexity is quadratic in number of states.
fn from(nfa:&NFA) -> Self {
let nfa_mat = nfa.nfa_matrix();
let eps_mat = nfa.eps_matrix();
let mut dfa_mat = Matrix::new(0,nfa.alphabet_segmentation.divisions.len());
let mut dfa_eps_ixs = Vec::<StateSetId>::new();
let mut dfa_eps_map = HashMap::<StateSetId,state::Identifier>::new();
dfa_eps_ixs.push(eps_mat[0].clone());
dfa_eps_map.insert(eps_mat[0].clone(),state::Identifier::from(0));
let mut i = 0;
while i < dfa_eps_ixs.len() {
dfa_mat.new_row();
for voc_ix in 0..nfa.alphabet_segmentation.divisions.len() {
let mut eps_set = StateSetId::new();
for &eps_ix in &dfa_eps_ixs[i] {
let tgt = nfa_mat[(eps_ix.id,voc_ix)];
if tgt!= state::Identifier::INVALID {
eps_set.extend(eps_mat[tgt.id].iter());
}
}
if!eps_set.is_empty() {
dfa_mat[(i,voc_ix)] = match dfa_eps_map.get(&eps_set) {
Some(&id) => id,
None => {
let id = state::Identifier::new(dfa_eps_ixs.len());
dfa_eps_ixs.push(eps_set.clone());
dfa_eps_map.insert(eps_set,id);
id
},
};
}
}
i += 1;
}
let mut callbacks = vec![None; dfa_eps_ixs.len()];
let priority = dfa_eps_ixs.len();
for (dfa_ix, epss) in dfa_eps_ixs.into_iter().enumerate() {
let has_name = |&key:&state::Identifier| nfa.states[key.id].name.is_some();
if let Some(eps) = epss.into_iter().find(has_name) {
let code = nfa.states[eps.id].name.as_ref().cloned().unwrap();
callbacks[dfa_ix] = Some(RuleExecutable {code,priority}); |
DFA{alphabet_segmentation,links,callbacks}
}
}
// ===========
// == Tests ==
// ===========
#[cfg(test)]
pub mod tests {
extern crate test;
use crate::automata::dfa;
use super::*;
use test::Bencher;
/// NFA that accepts a newline '\n'.
pub fn newline() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(10..=10,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![10, 11].as_slice()),
}
}
/// NFA that accepts any letter in the range a..=z.
pub fn letter() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(97..=122,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![97, 123].as_slice()),
}
}
/// NFA that accepts any number of spaces''.
pub fn spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![2]),
State::from(vec![(32..=32,3)]),
State::from(vec![4]),
State::from(vec![5,8]),
State::from(vec![6]),
State::from(vec![(32..=32,7)]),
State::from(vec![8]),
State::from(vec![5,9]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![0, 32, 33].as_slice()),
}
}
/// NFA that accepts one letter a..=z or many spaces''.
pub fn letter_and_spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1,3]),
State::from(vec![(97..=122,2)]),
State::from(vec![11]).named("group_0_rule_0"),
State::from(vec![4]),
State::from(vec![(32..=32,5)]),
State::from(vec![6]),
State::from(vec![7,10]),
State::from(vec![8]),
State::from(vec![(32..=32,9)]),
State::from(vec![10]),
State::from(vec![7,11]).named("group_0_rule_1"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![32, 33, 97, 123].as_slice()),
}
}
#[test]
fn test_to_dfa_newline() {
assert_eq!(DFA::from(&newline()),dfa::tests::newline());
}
#[test]
fn test_to_dfa_letter() {
assert_eq!(DFA::from(&letter()),dfa::tests::letter());
}
#[test]
fn test_to_dfa_spaces() {
assert_eq!(DFA::from(&spaces()),dfa::tests::spaces());
}
#[test]
fn test_to_dfa_letter_and_spaces() {
assert_eq!(DFA::from(&letter_and_spaces()),dfa::tests::letter_and_spaces());
}
#[bench]
fn bench_to_dfa_newline(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&newline()))
}
#[bench]
fn bench_to_dfa_letter(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter()))
}
#[bench]
fn bench_to_dfa_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&spaces()))
}
#[bench]
fn bench_to_dfa_letter_and_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter_and_spaces()))
}
} | }
}
let alphabet_segmentation = nfa.alphabet_segmentation.clone();
let links = dfa_mat; | random_line_split |
warming.rs | use std::collections::HashSet;
use std::ops::Deref;
use std::sync::{Arc, Mutex, Weak};
use std::thread::JoinHandle;
use std::time::Duration;
use crate::{Executor, Inventory, Searcher, SearcherGeneration, TantivyError};
pub const GC_INTERVAL: Duration = Duration::from_secs(1);
/// `Warmer` can be used to maintain segment-level state e.g. caches.
///
/// They must be registered with the [super::IndexReaderBuilder].
pub trait Warmer: Sync + Send {
/// Perform any warming work using the provided [Searcher].
fn warm(&self, searcher: &Searcher) -> crate::Result<()>;
/// Discards internal state for any [SearcherGeneration] not provided.
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]);
}
/// Warming-related state with interior mutability.
#[derive(Clone)]
pub(crate) struct WarmingState(Arc<Mutex<WarmingStateInner>>);
impl WarmingState {
pub fn new(
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
) -> crate::Result<Self> {
Ok(Self(Arc::new(Mutex::new(WarmingStateInner {
num_warming_threads,
warmers,
gc_thread: None,
warmed_generation_ids: Default::default(),
searcher_generation_inventory, |
/// Start tracking a new generation of [Searcher], and [Warmer::warm] it if there are active
/// warmers.
///
/// A background GC thread for [Warmer::garbage_collect] calls is uniquely created if there are
/// active warmers.
pub fn warm_new_searcher_generation(&self, searcher: &Searcher) -> crate::Result<()> {
self.0
.lock()
.unwrap()
.warm_new_searcher_generation(searcher, &self.0)
}
#[cfg(test)]
fn gc_maybe(&self) -> bool {
self.0.lock().unwrap().gc_maybe()
}
}
struct WarmingStateInner {
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
gc_thread: Option<JoinHandle<()>>,
// Contains all generations that have been warmed up.
// This list is used to avoid triggers the individual Warmer GCs
// if no warmed generation needs to be collected.
warmed_generation_ids: HashSet<u64>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
}
impl WarmingStateInner {
/// Start tracking provided searcher as an exemplar of a new generation.
/// If there are active warmers, warm them with the provided searcher, and kick background GC
/// thread if it has not yet been kicked. Otherwise, prune state for dropped searcher
/// generations inline.
fn warm_new_searcher_generation(
&mut self,
searcher: &Searcher,
this: &Arc<Mutex<Self>>,
) -> crate::Result<()> {
let warmers = self.pruned_warmers();
// Avoid threads (warming as well as background GC) if there are no warmers
if warmers.is_empty() {
return Ok(());
}
self.start_gc_thread_maybe(this)?;
self.warmed_generation_ids
.insert(searcher.generation().generation_id());
warming_executor(self.num_warming_threads.min(warmers.len()))?
.map(|warmer| warmer.warm(searcher), warmers.into_iter())?;
Ok(())
}
/// Attempt to upgrade the weak Warmer references, pruning those which cannot be upgraded.
/// Return the strong references.
fn pruned_warmers(&mut self) -> Vec<Arc<dyn Warmer>> {
let strong_warmers = self
.warmers
.iter()
.flat_map(|weak_warmer| weak_warmer.upgrade())
.collect::<Vec<_>>();
self.warmers = strong_warmers.iter().map(Arc::downgrade).collect();
strong_warmers
}
/// [Warmer::garbage_collect] active warmers if some searcher generation is observed to have
/// been dropped.
fn gc_maybe(&mut self) -> bool {
let live_generations = self.searcher_generation_inventory.list();
let live_generation_ids: HashSet<u64> = live_generations
.iter()
.map(|searcher_generation| searcher_generation.generation_id())
.collect();
let gc_not_required = self
.warmed_generation_ids
.iter()
.all(|warmed_up_generation| live_generation_ids.contains(warmed_up_generation));
if gc_not_required {
return false;
}
let live_generation_refs = live_generations
.iter()
.map(Deref::deref)
.collect::<Vec<_>>();
for warmer in self.pruned_warmers() {
warmer.garbage_collect(&live_generation_refs);
}
self.warmed_generation_ids = live_generation_ids;
true
}
/// Start GC thread if one has not already been started.
fn start_gc_thread_maybe(&mut self, this: &Arc<Mutex<Self>>) -> crate::Result<bool> {
if self.gc_thread.is_some() {
return Ok(false);
}
let weak_inner = Arc::downgrade(this);
let handle = std::thread::Builder::new()
.name("tantivy-warm-gc".to_owned())
.spawn(|| Self::gc_loop(weak_inner))
.map_err(|_| {
TantivyError::SystemError("Failed to spawn warmer GC thread".to_owned())
})?;
self.gc_thread = Some(handle);
Ok(true)
}
/// Every [GC_INTERVAL] attempt to GC, with panics caught and logged using
/// [std::panic::catch_unwind].
fn gc_loop(inner: Weak<Mutex<WarmingStateInner>>) {
for _ in crossbeam_channel::tick(GC_INTERVAL) {
if let Some(inner) = inner.upgrade() {
// rely on deterministic gc in tests
#[cfg(not(test))]
if let Err(err) = std::panic::catch_unwind(|| inner.lock().unwrap().gc_maybe()) {
error!("Panic in Warmer GC {:?}", err);
}
// avoid unused var warning in tests
#[cfg(test)]
drop(inner);
}
}
}
}
fn warming_executor(num_threads: usize) -> crate::Result<Executor> {
if num_threads <= 1 {
Ok(Executor::single_thread())
} else {
Executor::multi_thread(num_threads, "tantivy-warm-")
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::{Arc, RwLock, Weak};
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
#[derive(Default)]
struct TestWarmer {
active_segment_ids: RwLock<HashSet<SegmentId>>,
warm_calls: AtomicUsize,
gc_calls: AtomicUsize,
}
impl TestWarmer {
fn live_segment_ids(&self) -> HashSet<SegmentId> {
self.active_segment_ids.read().unwrap().clone()
}
fn warm_calls(&self) -> usize {
self.warm_calls.load(atomic::Ordering::Acquire)
}
fn gc_calls(&self) -> usize {
self.gc_calls.load(atomic::Ordering::Acquire)
}
fn verify(
&self,
expected_warm_calls: usize,
expected_gc_calls: usize,
expected_segment_ids: HashSet<SegmentId>,
) {
assert_eq!(self.warm_calls(), expected_warm_calls);
assert_eq!(self.gc_calls(), expected_gc_calls);
assert_eq!(self.live_segment_ids(), expected_segment_ids);
}
}
impl Warmer for TestWarmer {
fn warm(&self, searcher: &crate::Searcher) -> crate::Result<()> {
self.warm_calls.fetch_add(1, atomic::Ordering::SeqCst);
for reader in searcher.segment_readers() {
self.active_segment_ids
.write()
.unwrap()
.insert(reader.segment_id());
}
Ok(())
}
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
self.gc_calls
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let active_segment_ids = live_generations
.iter()
.flat_map(|searcher_generation| searcher_generation.segments().keys().copied())
.collect();
*self.active_segment_ids.write().unwrap() = active_segment_ids;
}
}
fn segment_ids(searcher: &Searcher) -> HashSet<SegmentId> {
searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect()
}
fn test_warming(num_warming_threads: usize) -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("pk", INDEXED);
let schema = schema_builder.build();
let directory = RamDirectory::create();
let index = Index::create(directory, schema, IndexSettings::default())?;
let num_writer_threads = 4;
let mut writer = index
.writer_with_num_threads(num_writer_threads, 25_000_000)
.unwrap();
for i in 0u64..1000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
let warmer1 = Arc::new(TestWarmer::default());
let warmer2 = Arc::new(TestWarmer::default());
warmer1.verify(0, 0, HashSet::new());
warmer2.verify(0, 0, HashSet::new());
let num_searchers = 4;
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.num_warming_threads(num_warming_threads)
.num_searchers(num_searchers)
.warmers(vec![
Arc::downgrade(&warmer1) as Weak<dyn Warmer>,
Arc::downgrade(&warmer2) as Weak<dyn Warmer>,
])
.try_into()?;
let warming_state = &reader.inner.warming_state;
let searcher = reader.searcher();
assert!(
!warming_state.gc_maybe(),
"no GC after first searcher generation"
);
warmer1.verify(1, 0, segment_ids(&searcher));
warmer2.verify(1, 0, segment_ids(&searcher));
assert_eq!(searcher.num_docs(), 1000);
for i in 1000u64..2000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
writer.wait_merging_threads()?;
drop(warmer1);
let old_searcher = searcher;
reader.reload()?;
assert!(!warming_state.gc_maybe(), "old searcher still around");
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2000);
warmer2.verify(
2,
0,
segment_ids(&old_searcher)
.union(&segment_ids(&searcher))
.copied()
.collect(),
);
drop(old_searcher);
for _ in 0..num_searchers {
// make sure the old searcher is dropped by the pool too
let _ = reader.searcher();
}
assert!(warming_state.gc_maybe(), "old searcher dropped");
warmer2.verify(2, 1, segment_ids(&searcher));
Ok(())
}
#[test]
fn warming_single_thread() -> crate::Result<()> {
test_warming(1)
}
#[test]
fn warming_four_threads() -> crate::Result<()> {
test_warming(4)
}
} | }))))
} | random_line_split |
warming.rs | use std::collections::HashSet;
use std::ops::Deref;
use std::sync::{Arc, Mutex, Weak};
use std::thread::JoinHandle;
use std::time::Duration;
use crate::{Executor, Inventory, Searcher, SearcherGeneration, TantivyError};
pub const GC_INTERVAL: Duration = Duration::from_secs(1);
/// `Warmer` can be used to maintain segment-level state e.g. caches.
///
/// They must be registered with the [super::IndexReaderBuilder].
pub trait Warmer: Sync + Send {
/// Perform any warming work using the provided [Searcher].
fn warm(&self, searcher: &Searcher) -> crate::Result<()>;
/// Discards internal state for any [SearcherGeneration] not provided.
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]);
}
/// Warming-related state with interior mutability.
#[derive(Clone)]
pub(crate) struct WarmingState(Arc<Mutex<WarmingStateInner>>);
impl WarmingState {
pub fn new(
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
) -> crate::Result<Self> {
Ok(Self(Arc::new(Mutex::new(WarmingStateInner {
num_warming_threads,
warmers,
gc_thread: None,
warmed_generation_ids: Default::default(),
searcher_generation_inventory,
}))))
}
/// Start tracking a new generation of [Searcher], and [Warmer::warm] it if there are active
/// warmers.
///
/// A background GC thread for [Warmer::garbage_collect] calls is uniquely created if there are
/// active warmers.
pub fn warm_new_searcher_generation(&self, searcher: &Searcher) -> crate::Result<()> {
self.0
.lock()
.unwrap()
.warm_new_searcher_generation(searcher, &self.0)
}
#[cfg(test)]
fn gc_maybe(&self) -> bool {
self.0.lock().unwrap().gc_maybe()
}
}
struct WarmingStateInner {
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
gc_thread: Option<JoinHandle<()>>,
// Contains all generations that have been warmed up.
// This list is used to avoid triggers the individual Warmer GCs
// if no warmed generation needs to be collected.
warmed_generation_ids: HashSet<u64>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
}
impl WarmingStateInner {
/// Start tracking provided searcher as an exemplar of a new generation.
/// If there are active warmers, warm them with the provided searcher, and kick background GC
/// thread if it has not yet been kicked. Otherwise, prune state for dropped searcher
/// generations inline.
fn | (
&mut self,
searcher: &Searcher,
this: &Arc<Mutex<Self>>,
) -> crate::Result<()> {
let warmers = self.pruned_warmers();
// Avoid threads (warming as well as background GC) if there are no warmers
if warmers.is_empty() {
return Ok(());
}
self.start_gc_thread_maybe(this)?;
self.warmed_generation_ids
.insert(searcher.generation().generation_id());
warming_executor(self.num_warming_threads.min(warmers.len()))?
.map(|warmer| warmer.warm(searcher), warmers.into_iter())?;
Ok(())
}
/// Attempt to upgrade the weak Warmer references, pruning those which cannot be upgraded.
/// Return the strong references.
fn pruned_warmers(&mut self) -> Vec<Arc<dyn Warmer>> {
let strong_warmers = self
.warmers
.iter()
.flat_map(|weak_warmer| weak_warmer.upgrade())
.collect::<Vec<_>>();
self.warmers = strong_warmers.iter().map(Arc::downgrade).collect();
strong_warmers
}
/// [Warmer::garbage_collect] active warmers if some searcher generation is observed to have
/// been dropped.
fn gc_maybe(&mut self) -> bool {
let live_generations = self.searcher_generation_inventory.list();
let live_generation_ids: HashSet<u64> = live_generations
.iter()
.map(|searcher_generation| searcher_generation.generation_id())
.collect();
let gc_not_required = self
.warmed_generation_ids
.iter()
.all(|warmed_up_generation| live_generation_ids.contains(warmed_up_generation));
if gc_not_required {
return false;
}
let live_generation_refs = live_generations
.iter()
.map(Deref::deref)
.collect::<Vec<_>>();
for warmer in self.pruned_warmers() {
warmer.garbage_collect(&live_generation_refs);
}
self.warmed_generation_ids = live_generation_ids;
true
}
/// Start GC thread if one has not already been started.
fn start_gc_thread_maybe(&mut self, this: &Arc<Mutex<Self>>) -> crate::Result<bool> {
if self.gc_thread.is_some() {
return Ok(false);
}
let weak_inner = Arc::downgrade(this);
let handle = std::thread::Builder::new()
.name("tantivy-warm-gc".to_owned())
.spawn(|| Self::gc_loop(weak_inner))
.map_err(|_| {
TantivyError::SystemError("Failed to spawn warmer GC thread".to_owned())
})?;
self.gc_thread = Some(handle);
Ok(true)
}
/// Every [GC_INTERVAL] attempt to GC, with panics caught and logged using
/// [std::panic::catch_unwind].
fn gc_loop(inner: Weak<Mutex<WarmingStateInner>>) {
for _ in crossbeam_channel::tick(GC_INTERVAL) {
if let Some(inner) = inner.upgrade() {
// rely on deterministic gc in tests
#[cfg(not(test))]
if let Err(err) = std::panic::catch_unwind(|| inner.lock().unwrap().gc_maybe()) {
error!("Panic in Warmer GC {:?}", err);
}
// avoid unused var warning in tests
#[cfg(test)]
drop(inner);
}
}
}
}
fn warming_executor(num_threads: usize) -> crate::Result<Executor> {
if num_threads <= 1 {
Ok(Executor::single_thread())
} else {
Executor::multi_thread(num_threads, "tantivy-warm-")
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::{Arc, RwLock, Weak};
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
#[derive(Default)]
struct TestWarmer {
active_segment_ids: RwLock<HashSet<SegmentId>>,
warm_calls: AtomicUsize,
gc_calls: AtomicUsize,
}
impl TestWarmer {
fn live_segment_ids(&self) -> HashSet<SegmentId> {
self.active_segment_ids.read().unwrap().clone()
}
fn warm_calls(&self) -> usize {
self.warm_calls.load(atomic::Ordering::Acquire)
}
fn gc_calls(&self) -> usize {
self.gc_calls.load(atomic::Ordering::Acquire)
}
fn verify(
&self,
expected_warm_calls: usize,
expected_gc_calls: usize,
expected_segment_ids: HashSet<SegmentId>,
) {
assert_eq!(self.warm_calls(), expected_warm_calls);
assert_eq!(self.gc_calls(), expected_gc_calls);
assert_eq!(self.live_segment_ids(), expected_segment_ids);
}
}
impl Warmer for TestWarmer {
fn warm(&self, searcher: &crate::Searcher) -> crate::Result<()> {
self.warm_calls.fetch_add(1, atomic::Ordering::SeqCst);
for reader in searcher.segment_readers() {
self.active_segment_ids
.write()
.unwrap()
.insert(reader.segment_id());
}
Ok(())
}
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
self.gc_calls
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let active_segment_ids = live_generations
.iter()
.flat_map(|searcher_generation| searcher_generation.segments().keys().copied())
.collect();
*self.active_segment_ids.write().unwrap() = active_segment_ids;
}
}
fn segment_ids(searcher: &Searcher) -> HashSet<SegmentId> {
searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect()
}
fn test_warming(num_warming_threads: usize) -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("pk", INDEXED);
let schema = schema_builder.build();
let directory = RamDirectory::create();
let index = Index::create(directory, schema, IndexSettings::default())?;
let num_writer_threads = 4;
let mut writer = index
.writer_with_num_threads(num_writer_threads, 25_000_000)
.unwrap();
for i in 0u64..1000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
let warmer1 = Arc::new(TestWarmer::default());
let warmer2 = Arc::new(TestWarmer::default());
warmer1.verify(0, 0, HashSet::new());
warmer2.verify(0, 0, HashSet::new());
let num_searchers = 4;
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.num_warming_threads(num_warming_threads)
.num_searchers(num_searchers)
.warmers(vec![
Arc::downgrade(&warmer1) as Weak<dyn Warmer>,
Arc::downgrade(&warmer2) as Weak<dyn Warmer>,
])
.try_into()?;
let warming_state = &reader.inner.warming_state;
let searcher = reader.searcher();
assert!(
!warming_state.gc_maybe(),
"no GC after first searcher generation"
);
warmer1.verify(1, 0, segment_ids(&searcher));
warmer2.verify(1, 0, segment_ids(&searcher));
assert_eq!(searcher.num_docs(), 1000);
for i in 1000u64..2000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
writer.wait_merging_threads()?;
drop(warmer1);
let old_searcher = searcher;
reader.reload()?;
assert!(!warming_state.gc_maybe(), "old searcher still around");
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2000);
warmer2.verify(
2,
0,
segment_ids(&old_searcher)
.union(&segment_ids(&searcher))
.copied()
.collect(),
);
drop(old_searcher);
for _ in 0..num_searchers {
// make sure the old searcher is dropped by the pool too
let _ = reader.searcher();
}
assert!(warming_state.gc_maybe(), "old searcher dropped");
warmer2.verify(2, 1, segment_ids(&searcher));
Ok(())
}
#[test]
fn warming_single_thread() -> crate::Result<()> {
test_warming(1)
}
#[test]
fn warming_four_threads() -> crate::Result<()> {
test_warming(4)
}
}
| warm_new_searcher_generation | identifier_name |
warming.rs | use std::collections::HashSet;
use std::ops::Deref;
use std::sync::{Arc, Mutex, Weak};
use std::thread::JoinHandle;
use std::time::Duration;
use crate::{Executor, Inventory, Searcher, SearcherGeneration, TantivyError};
pub const GC_INTERVAL: Duration = Duration::from_secs(1);
/// `Warmer` can be used to maintain segment-level state e.g. caches.
///
/// They must be registered with the [super::IndexReaderBuilder].
pub trait Warmer: Sync + Send {
/// Perform any warming work using the provided [Searcher].
fn warm(&self, searcher: &Searcher) -> crate::Result<()>;
/// Discards internal state for any [SearcherGeneration] not provided.
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]);
}
/// Warming-related state with interior mutability.
#[derive(Clone)]
pub(crate) struct WarmingState(Arc<Mutex<WarmingStateInner>>);
impl WarmingState {
pub fn new(
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
) -> crate::Result<Self> {
Ok(Self(Arc::new(Mutex::new(WarmingStateInner {
num_warming_threads,
warmers,
gc_thread: None,
warmed_generation_ids: Default::default(),
searcher_generation_inventory,
}))))
}
/// Start tracking a new generation of [Searcher], and [Warmer::warm] it if there are active
/// warmers.
///
/// A background GC thread for [Warmer::garbage_collect] calls is uniquely created if there are
/// active warmers.
pub fn warm_new_searcher_generation(&self, searcher: &Searcher) -> crate::Result<()> {
self.0
.lock()
.unwrap()
.warm_new_searcher_generation(searcher, &self.0)
}
#[cfg(test)]
fn gc_maybe(&self) -> bool {
self.0.lock().unwrap().gc_maybe()
}
}
struct WarmingStateInner {
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
gc_thread: Option<JoinHandle<()>>,
// Contains all generations that have been warmed up.
// This list is used to avoid triggers the individual Warmer GCs
// if no warmed generation needs to be collected.
warmed_generation_ids: HashSet<u64>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
}
impl WarmingStateInner {
/// Start tracking provided searcher as an exemplar of a new generation.
/// If there are active warmers, warm them with the provided searcher, and kick background GC
/// thread if it has not yet been kicked. Otherwise, prune state for dropped searcher
/// generations inline.
fn warm_new_searcher_generation(
&mut self,
searcher: &Searcher,
this: &Arc<Mutex<Self>>,
) -> crate::Result<()> {
let warmers = self.pruned_warmers();
// Avoid threads (warming as well as background GC) if there are no warmers
if warmers.is_empty() {
return Ok(());
}
self.start_gc_thread_maybe(this)?;
self.warmed_generation_ids
.insert(searcher.generation().generation_id());
warming_executor(self.num_warming_threads.min(warmers.len()))?
.map(|warmer| warmer.warm(searcher), warmers.into_iter())?;
Ok(())
}
/// Attempt to upgrade the weak Warmer references, pruning those which cannot be upgraded.
/// Return the strong references.
fn pruned_warmers(&mut self) -> Vec<Arc<dyn Warmer>> {
let strong_warmers = self
.warmers
.iter()
.flat_map(|weak_warmer| weak_warmer.upgrade())
.collect::<Vec<_>>();
self.warmers = strong_warmers.iter().map(Arc::downgrade).collect();
strong_warmers
}
/// [Warmer::garbage_collect] active warmers if some searcher generation is observed to have
/// been dropped.
fn gc_maybe(&mut self) -> bool {
let live_generations = self.searcher_generation_inventory.list();
let live_generation_ids: HashSet<u64> = live_generations
.iter()
.map(|searcher_generation| searcher_generation.generation_id())
.collect();
let gc_not_required = self
.warmed_generation_ids
.iter()
.all(|warmed_up_generation| live_generation_ids.contains(warmed_up_generation));
if gc_not_required {
return false;
}
let live_generation_refs = live_generations
.iter()
.map(Deref::deref)
.collect::<Vec<_>>();
for warmer in self.pruned_warmers() {
warmer.garbage_collect(&live_generation_refs);
}
self.warmed_generation_ids = live_generation_ids;
true
}
/// Start GC thread if one has not already been started.
fn start_gc_thread_maybe(&mut self, this: &Arc<Mutex<Self>>) -> crate::Result<bool> {
if self.gc_thread.is_some() {
return Ok(false);
}
let weak_inner = Arc::downgrade(this);
let handle = std::thread::Builder::new()
.name("tantivy-warm-gc".to_owned())
.spawn(|| Self::gc_loop(weak_inner))
.map_err(|_| {
TantivyError::SystemError("Failed to spawn warmer GC thread".to_owned())
})?;
self.gc_thread = Some(handle);
Ok(true)
}
/// Every [GC_INTERVAL] attempt to GC, with panics caught and logged using
/// [std::panic::catch_unwind].
fn gc_loop(inner: Weak<Mutex<WarmingStateInner>>) {
for _ in crossbeam_channel::tick(GC_INTERVAL) {
if let Some(inner) = inner.upgrade() {
// rely on deterministic gc in tests
#[cfg(not(test))]
if let Err(err) = std::panic::catch_unwind(|| inner.lock().unwrap().gc_maybe()) {
error!("Panic in Warmer GC {:?}", err);
}
// avoid unused var warning in tests
#[cfg(test)]
drop(inner);
}
}
}
}
fn warming_executor(num_threads: usize) -> crate::Result<Executor> {
if num_threads <= 1 | else {
Executor::multi_thread(num_threads, "tantivy-warm-")
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::{Arc, RwLock, Weak};
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
#[derive(Default)]
struct TestWarmer {
active_segment_ids: RwLock<HashSet<SegmentId>>,
warm_calls: AtomicUsize,
gc_calls: AtomicUsize,
}
impl TestWarmer {
fn live_segment_ids(&self) -> HashSet<SegmentId> {
self.active_segment_ids.read().unwrap().clone()
}
fn warm_calls(&self) -> usize {
self.warm_calls.load(atomic::Ordering::Acquire)
}
fn gc_calls(&self) -> usize {
self.gc_calls.load(atomic::Ordering::Acquire)
}
fn verify(
&self,
expected_warm_calls: usize,
expected_gc_calls: usize,
expected_segment_ids: HashSet<SegmentId>,
) {
assert_eq!(self.warm_calls(), expected_warm_calls);
assert_eq!(self.gc_calls(), expected_gc_calls);
assert_eq!(self.live_segment_ids(), expected_segment_ids);
}
}
impl Warmer for TestWarmer {
fn warm(&self, searcher: &crate::Searcher) -> crate::Result<()> {
self.warm_calls.fetch_add(1, atomic::Ordering::SeqCst);
for reader in searcher.segment_readers() {
self.active_segment_ids
.write()
.unwrap()
.insert(reader.segment_id());
}
Ok(())
}
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
self.gc_calls
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let active_segment_ids = live_generations
.iter()
.flat_map(|searcher_generation| searcher_generation.segments().keys().copied())
.collect();
*self.active_segment_ids.write().unwrap() = active_segment_ids;
}
}
fn segment_ids(searcher: &Searcher) -> HashSet<SegmentId> {
searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect()
}
fn test_warming(num_warming_threads: usize) -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("pk", INDEXED);
let schema = schema_builder.build();
let directory = RamDirectory::create();
let index = Index::create(directory, schema, IndexSettings::default())?;
let num_writer_threads = 4;
let mut writer = index
.writer_with_num_threads(num_writer_threads, 25_000_000)
.unwrap();
for i in 0u64..1000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
let warmer1 = Arc::new(TestWarmer::default());
let warmer2 = Arc::new(TestWarmer::default());
warmer1.verify(0, 0, HashSet::new());
warmer2.verify(0, 0, HashSet::new());
let num_searchers = 4;
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.num_warming_threads(num_warming_threads)
.num_searchers(num_searchers)
.warmers(vec![
Arc::downgrade(&warmer1) as Weak<dyn Warmer>,
Arc::downgrade(&warmer2) as Weak<dyn Warmer>,
])
.try_into()?;
let warming_state = &reader.inner.warming_state;
let searcher = reader.searcher();
assert!(
!warming_state.gc_maybe(),
"no GC after first searcher generation"
);
warmer1.verify(1, 0, segment_ids(&searcher));
warmer2.verify(1, 0, segment_ids(&searcher));
assert_eq!(searcher.num_docs(), 1000);
for i in 1000u64..2000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
writer.wait_merging_threads()?;
drop(warmer1);
let old_searcher = searcher;
reader.reload()?;
assert!(!warming_state.gc_maybe(), "old searcher still around");
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2000);
warmer2.verify(
2,
0,
segment_ids(&old_searcher)
.union(&segment_ids(&searcher))
.copied()
.collect(),
);
drop(old_searcher);
for _ in 0..num_searchers {
// make sure the old searcher is dropped by the pool too
let _ = reader.searcher();
}
assert!(warming_state.gc_maybe(), "old searcher dropped");
warmer2.verify(2, 1, segment_ids(&searcher));
Ok(())
}
#[test]
fn warming_single_thread() -> crate::Result<()> {
test_warming(1)
}
#[test]
fn warming_four_threads() -> crate::Result<()> {
test_warming(4)
}
}
| {
Ok(Executor::single_thread())
} | conditional_block |
main.rs | #![feature(test)]
#[macro_use] extern crate gfx;
extern crate gfx_window_glutin;
extern crate gfx_device_gl;
extern crate glutin;
extern crate rand;
extern crate failure;
#[macro_use] extern crate failure_derive;
extern crate image;
extern crate rusttype;
extern crate specs;
extern crate rayon;
#[macro_use] extern crate specs_derive;
extern crate num_integer;
#[macro_use] extern crate lazy_static;
extern crate serde;
extern crate serde_yaml;
extern crate cgmath;
#[macro_use] extern crate serde_derive;
#[cfg(test)]
extern crate test;
mod renderer;
mod comp;
mod input;
mod sys_control;
mod sys_health;
mod sys_phys;
mod sys_anim;
mod sys_lifetime;
mod sys_on_hit;
mod sys_pickup;
mod sys_death_drop;
mod sys_track_pos;
mod sys_match_anim;
mod sys_set_equipment;
mod vec;
mod ui;
mod camera;
mod math_util;
mod item;
mod inventory;
mod drop_tables;
mod asset_loader;
use comp::*;
use vec::*;
use specs::*;
use gfx::Device;
use gfx_window_glutin as gfx_glutin;
use glutin::{GlRequest, GlContext};
use glutin::Api::OpenGl;
use std::time;
use std::thread;
use rand::SeedableRng;
use renderer::get_asset_by_name;
pub struct CollisionMeta {
/// This normal points outwards from entity B to entity A (and is also used
/// to resolve circ - circ collisions)
/// Will be normalised.
#[allow(dead_code)]
normal: Vec32,
}
/// Lists pairs of collisions.
pub struct Collisions(Vec<(Entity, Entity, CollisionMeta)>);
pub struct DeltaTime(pub f32);
/// Vertex buffer for game objects
pub struct GameVertexBuffer(renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct UIVertexBuffer(renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) {}
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Alliance::good())
.with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
.with(MarkerSys, "update",
&["phys", "anim_sprite", "health", "follow_camera",
"oh_knockback", "track_pos", "match_anim"])
// After-death effects
.with(sys_death_drop::OnDeathDropSys::new(
rand::rngs::StdRng::from_rng(
rand::thread_rng()).unwrap()),
"on_death_drop", &["update"])
// Paint
.with(renderer::TilemapPainter::new(), "tilemap_paint", &["update"])
.with(renderer::SpritePainter, "sprite_paint", &["update"])
.with(renderer::InventoryPainter, "ui_inventory_paint", &["update"])
.build();
dispatcher.setup(&mut world.res);
// Number of frames until we print another frame time
let mut fps_count_timer = 60;
loop {
let start = time::Instant::now();
// update input
{
let mut input_state = world.write_resource::<input::InputState>();
input_state.process_input(&input_map, &mut events_loop);
if input_state.should_close { break; } // Early return for speedy exit
// Update window size if needed
if input_state.window_dimensions_need_update |
}
// Update & paint the world
{
dispatcher.dispatch_seq(&mut world.res);
// Get the player position
let player_pos = world.read_storage::<Pos>().get(player).unwrap().clone();
let player_pos = [player_pos.pos.x, player_pos.z, player_pos.pos.y];
let mut ui_v_buf = world.write_resource::<UIVertexBuffer>();
let mut game_v_buf = world.write_resource::<GameVertexBuffer>();
let mut terrain_v_buf = world.write_resource::<TerrainVertexBuffer>();
let mut terrain_v_buf_needs_update =
world.write_resource::<TerrainVertexBufferNeedsUpdate>();
let camera = &world.read_resource::<camera::Camera>();
// Update buffers
renderer.update_buffer(&ui_v_buf.0, renderer::BufferType::UI);
renderer.update_buffer(&game_v_buf.0, renderer::BufferType::Game);
if terrain_v_buf_needs_update.0 {
renderer.update_buffer(&terrain_v_buf.0, renderer::BufferType::Terrain);
terrain_v_buf_needs_update.0 = false;
}
// Clear & render
renderer.clear();
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Terrain);
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Game);
renderer.clear_depth();
renderer.render_buffer(&camera, [0.0, 0.0, 0.0], renderer::BufferType::UI);
renderer.flush(&mut device);
window.swap_buffers().unwrap();
device.cleanup();
// Reset ECS state after rendering
// After painting, we need to clear the v_buf
ui_v_buf.0.size = 0;
game_v_buf.0.size = 0;
terrain_v_buf.0.size = 0;
// Clear collision list for next frame
let mut collisions = world.write_resource::<Collisions>();
collisions.0.clear();
let mut killed = world.write_resource::<KilledEntities>();
killed.0.clear();
}
// Actually delete all entities that need to be deleted
world.maintain();
// Calculate frame time
let elapsed = start.elapsed();
if fps_count_timer <= 0 {
println!("Time taken (millis): {:?}",
elapsed.as_secs() * 1000 + elapsed.subsec_millis() as u64);
fps_count_timer = 60;
}
fps_count_timer -= 1;
// Sleep until we hit 60fps. Vsync works until the window isn't being
// rendered, then we just consume CPU!
if elapsed.subsec_millis() < 17 && elapsed.as_secs() == 0 {
thread::sleep(time::Duration::from_millis(17) - elapsed);
}
}
}
| {
println!("Resizing window viewport");
renderer.update_window_size(&window);
} | conditional_block |
main.rs | #![feature(test)]
#[macro_use] extern crate gfx;
extern crate gfx_window_glutin;
extern crate gfx_device_gl;
extern crate glutin;
extern crate rand;
extern crate failure;
#[macro_use] extern crate failure_derive;
extern crate image;
extern crate rusttype;
extern crate specs;
extern crate rayon;
#[macro_use] extern crate specs_derive;
extern crate num_integer;
#[macro_use] extern crate lazy_static;
extern crate serde;
extern crate serde_yaml;
extern crate cgmath;
#[macro_use] extern crate serde_derive;
#[cfg(test)]
extern crate test;
mod renderer;
mod comp;
mod input;
mod sys_control;
mod sys_health;
mod sys_phys;
mod sys_anim;
mod sys_lifetime;
mod sys_on_hit;
mod sys_pickup;
mod sys_death_drop;
mod sys_track_pos;
mod sys_match_anim;
mod sys_set_equipment;
mod vec;
mod ui;
mod camera;
mod math_util;
mod item;
mod inventory;
mod drop_tables;
mod asset_loader;
use comp::*;
use vec::*;
use specs::*;
use gfx::Device;
use gfx_window_glutin as gfx_glutin;
use glutin::{GlRequest, GlContext};
use glutin::Api::OpenGl;
use std::time;
use std::thread;
use rand::SeedableRng;
use renderer::get_asset_by_name;
pub struct CollisionMeta {
/// This normal points outwards from entity B to entity A (and is also used
/// to resolve circ - circ collisions)
/// Will be normalised.
#[allow(dead_code)]
normal: Vec32,
}
/// Lists pairs of collisions.
pub struct Collisions(Vec<(Entity, Entity, CollisionMeta)>);
pub struct DeltaTime(pub f32);
/// Vertex buffer for game objects
pub struct GameVertexBuffer(renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct UIVertexBuffer(renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) {}
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 }) | .with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
.with(MarkerSys, "update",
&["phys", "anim_sprite", "health", "follow_camera",
"oh_knockback", "track_pos", "match_anim"])
// After-death effects
.with(sys_death_drop::OnDeathDropSys::new(
rand::rngs::StdRng::from_rng(
rand::thread_rng()).unwrap()),
"on_death_drop", &["update"])
// Paint
.with(renderer::TilemapPainter::new(), "tilemap_paint", &["update"])
.with(renderer::SpritePainter, "sprite_paint", &["update"])
.with(renderer::InventoryPainter, "ui_inventory_paint", &["update"])
.build();
dispatcher.setup(&mut world.res);
// Number of frames until we print another frame time
let mut fps_count_timer = 60;
loop {
let start = time::Instant::now();
// update input
{
let mut input_state = world.write_resource::<input::InputState>();
input_state.process_input(&input_map, &mut events_loop);
if input_state.should_close { break; } // Early return for speedy exit
// Update window size if needed
if input_state.window_dimensions_need_update {
println!("Resizing window viewport");
renderer.update_window_size(&window);
}
}
// Update & paint the world
{
dispatcher.dispatch_seq(&mut world.res);
// Get the player position
let player_pos = world.read_storage::<Pos>().get(player).unwrap().clone();
let player_pos = [player_pos.pos.x, player_pos.z, player_pos.pos.y];
let mut ui_v_buf = world.write_resource::<UIVertexBuffer>();
let mut game_v_buf = world.write_resource::<GameVertexBuffer>();
let mut terrain_v_buf = world.write_resource::<TerrainVertexBuffer>();
let mut terrain_v_buf_needs_update =
world.write_resource::<TerrainVertexBufferNeedsUpdate>();
let camera = &world.read_resource::<camera::Camera>();
// Update buffers
renderer.update_buffer(&ui_v_buf.0, renderer::BufferType::UI);
renderer.update_buffer(&game_v_buf.0, renderer::BufferType::Game);
if terrain_v_buf_needs_update.0 {
renderer.update_buffer(&terrain_v_buf.0, renderer::BufferType::Terrain);
terrain_v_buf_needs_update.0 = false;
}
// Clear & render
renderer.clear();
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Terrain);
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Game);
renderer.clear_depth();
renderer.render_buffer(&camera, [0.0, 0.0, 0.0], renderer::BufferType::UI);
renderer.flush(&mut device);
window.swap_buffers().unwrap();
device.cleanup();
// Reset ECS state after rendering
// After painting, we need to clear the v_buf
ui_v_buf.0.size = 0;
game_v_buf.0.size = 0;
terrain_v_buf.0.size = 0;
// Clear collision list for next frame
let mut collisions = world.write_resource::<Collisions>();
collisions.0.clear();
let mut killed = world.write_resource::<KilledEntities>();
killed.0.clear();
}
// Actually delete all entities that need to be deleted
world.maintain();
// Calculate frame time
let elapsed = start.elapsed();
if fps_count_timer <= 0 {
println!("Time taken (millis): {:?}",
elapsed.as_secs() * 1000 + elapsed.subsec_millis() as u64);
fps_count_timer = 60;
}
fps_count_timer -= 1;
// Sleep until we hit 60fps. Vsync works until the window isn't being
// rendered, then we just consume CPU!
if elapsed.subsec_millis() < 17 && elapsed.as_secs() == 0 {
thread::sleep(time::Duration::from_millis(17) - elapsed);
}
}
} | .with(Vel { vel: Vec32::zero() })
.with(Alliance::good()) | random_line_split |
main.rs | #![feature(test)]
#[macro_use] extern crate gfx;
extern crate gfx_window_glutin;
extern crate gfx_device_gl;
extern crate glutin;
extern crate rand;
extern crate failure;
#[macro_use] extern crate failure_derive;
extern crate image;
extern crate rusttype;
extern crate specs;
extern crate rayon;
#[macro_use] extern crate specs_derive;
extern crate num_integer;
#[macro_use] extern crate lazy_static;
extern crate serde;
extern crate serde_yaml;
extern crate cgmath;
#[macro_use] extern crate serde_derive;
#[cfg(test)]
extern crate test;
mod renderer;
mod comp;
mod input;
mod sys_control;
mod sys_health;
mod sys_phys;
mod sys_anim;
mod sys_lifetime;
mod sys_on_hit;
mod sys_pickup;
mod sys_death_drop;
mod sys_track_pos;
mod sys_match_anim;
mod sys_set_equipment;
mod vec;
mod ui;
mod camera;
mod math_util;
mod item;
mod inventory;
mod drop_tables;
mod asset_loader;
use comp::*;
use vec::*;
use specs::*;
use gfx::Device;
use gfx_window_glutin as gfx_glutin;
use glutin::{GlRequest, GlContext};
use glutin::Api::OpenGl;
use std::time;
use std::thread;
use rand::SeedableRng;
use renderer::get_asset_by_name;
pub struct CollisionMeta {
/// This normal points outwards from entity B to entity A (and is also used
/// to resolve circ - circ collisions)
/// Will be normalised.
#[allow(dead_code)]
normal: Vec32,
}
/// Lists pairs of collisions.
pub struct Collisions(Vec<(Entity, Entity, CollisionMeta)>);
pub struct DeltaTime(pub f32);
/// Vertex buffer for game objects
pub struct GameVertexBuffer(renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct | (renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) {}
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Alliance::good())
.with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
.with(MarkerSys, "update",
&["phys", "anim_sprite", "health", "follow_camera",
"oh_knockback", "track_pos", "match_anim"])
// After-death effects
.with(sys_death_drop::OnDeathDropSys::new(
rand::rngs::StdRng::from_rng(
rand::thread_rng()).unwrap()),
"on_death_drop", &["update"])
// Paint
.with(renderer::TilemapPainter::new(), "tilemap_paint", &["update"])
.with(renderer::SpritePainter, "sprite_paint", &["update"])
.with(renderer::InventoryPainter, "ui_inventory_paint", &["update"])
.build();
dispatcher.setup(&mut world.res);
// Number of frames until we print another frame time
let mut fps_count_timer = 60;
loop {
let start = time::Instant::now();
// update input
{
let mut input_state = world.write_resource::<input::InputState>();
input_state.process_input(&input_map, &mut events_loop);
if input_state.should_close { break; } // Early return for speedy exit
// Update window size if needed
if input_state.window_dimensions_need_update {
println!("Resizing window viewport");
renderer.update_window_size(&window);
}
}
// Update & paint the world
{
dispatcher.dispatch_seq(&mut world.res);
// Get the player position
let player_pos = world.read_storage::<Pos>().get(player).unwrap().clone();
let player_pos = [player_pos.pos.x, player_pos.z, player_pos.pos.y];
let mut ui_v_buf = world.write_resource::<UIVertexBuffer>();
let mut game_v_buf = world.write_resource::<GameVertexBuffer>();
let mut terrain_v_buf = world.write_resource::<TerrainVertexBuffer>();
let mut terrain_v_buf_needs_update =
world.write_resource::<TerrainVertexBufferNeedsUpdate>();
let camera = &world.read_resource::<camera::Camera>();
// Update buffers
renderer.update_buffer(&ui_v_buf.0, renderer::BufferType::UI);
renderer.update_buffer(&game_v_buf.0, renderer::BufferType::Game);
if terrain_v_buf_needs_update.0 {
renderer.update_buffer(&terrain_v_buf.0, renderer::BufferType::Terrain);
terrain_v_buf_needs_update.0 = false;
}
// Clear & render
renderer.clear();
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Terrain);
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Game);
renderer.clear_depth();
renderer.render_buffer(&camera, [0.0, 0.0, 0.0], renderer::BufferType::UI);
renderer.flush(&mut device);
window.swap_buffers().unwrap();
device.cleanup();
// Reset ECS state after rendering
// After painting, we need to clear the v_buf
ui_v_buf.0.size = 0;
game_v_buf.0.size = 0;
terrain_v_buf.0.size = 0;
// Clear collision list for next frame
let mut collisions = world.write_resource::<Collisions>();
collisions.0.clear();
let mut killed = world.write_resource::<KilledEntities>();
killed.0.clear();
}
// Actually delete all entities that need to be deleted
world.maintain();
// Calculate frame time
let elapsed = start.elapsed();
if fps_count_timer <= 0 {
println!("Time taken (millis): {:?}",
elapsed.as_secs() * 1000 + elapsed.subsec_millis() as u64);
fps_count_timer = 60;
}
fps_count_timer -= 1;
// Sleep until we hit 60fps. Vsync works until the window isn't being
// rendered, then we just consume CPU!
if elapsed.subsec_millis() < 17 && elapsed.as_secs() == 0 {
thread::sleep(time::Duration::from_millis(17) - elapsed);
}
}
}
| UIVertexBuffer | identifier_name |
main.rs | #![feature(test)]
#[macro_use] extern crate gfx;
extern crate gfx_window_glutin;
extern crate gfx_device_gl;
extern crate glutin;
extern crate rand;
extern crate failure;
#[macro_use] extern crate failure_derive;
extern crate image;
extern crate rusttype;
extern crate specs;
extern crate rayon;
#[macro_use] extern crate specs_derive;
extern crate num_integer;
#[macro_use] extern crate lazy_static;
extern crate serde;
extern crate serde_yaml;
extern crate cgmath;
#[macro_use] extern crate serde_derive;
#[cfg(test)]
extern crate test;
mod renderer;
mod comp;
mod input;
mod sys_control;
mod sys_health;
mod sys_phys;
mod sys_anim;
mod sys_lifetime;
mod sys_on_hit;
mod sys_pickup;
mod sys_death_drop;
mod sys_track_pos;
mod sys_match_anim;
mod sys_set_equipment;
mod vec;
mod ui;
mod camera;
mod math_util;
mod item;
mod inventory;
mod drop_tables;
mod asset_loader;
use comp::*;
use vec::*;
use specs::*;
use gfx::Device;
use gfx_window_glutin as gfx_glutin;
use glutin::{GlRequest, GlContext};
use glutin::Api::OpenGl;
use std::time;
use std::thread;
use rand::SeedableRng;
use renderer::get_asset_by_name;
pub struct CollisionMeta {
/// This normal points outwards from entity B to entity A (and is also used
/// to resolve circ - circ collisions)
/// Will be normalised.
#[allow(dead_code)]
normal: Vec32,
}
/// Lists pairs of collisions.
pub struct Collisions(Vec<(Entity, Entity, CollisionMeta)>);
pub struct DeltaTime(pub f32);
/// Vertex buffer for game objects
pub struct GameVertexBuffer(renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct UIVertexBuffer(renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) |
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Alliance::good())
.with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
.with(MarkerSys, "update",
&["phys", "anim_sprite", "health", "follow_camera",
"oh_knockback", "track_pos", "match_anim"])
// After-death effects
.with(sys_death_drop::OnDeathDropSys::new(
rand::rngs::StdRng::from_rng(
rand::thread_rng()).unwrap()),
"on_death_drop", &["update"])
// Paint
.with(renderer::TilemapPainter::new(), "tilemap_paint", &["update"])
.with(renderer::SpritePainter, "sprite_paint", &["update"])
.with(renderer::InventoryPainter, "ui_inventory_paint", &["update"])
.build();
dispatcher.setup(&mut world.res);
// Number of frames until we print another frame time
let mut fps_count_timer = 60;
loop {
let start = time::Instant::now();
// update input
{
let mut input_state = world.write_resource::<input::InputState>();
input_state.process_input(&input_map, &mut events_loop);
if input_state.should_close { break; } // Early return for speedy exit
// Update window size if needed
if input_state.window_dimensions_need_update {
println!("Resizing window viewport");
renderer.update_window_size(&window);
}
}
// Update & paint the world
{
dispatcher.dispatch_seq(&mut world.res);
// Get the player position
let player_pos = world.read_storage::<Pos>().get(player).unwrap().clone();
let player_pos = [player_pos.pos.x, player_pos.z, player_pos.pos.y];
let mut ui_v_buf = world.write_resource::<UIVertexBuffer>();
let mut game_v_buf = world.write_resource::<GameVertexBuffer>();
let mut terrain_v_buf = world.write_resource::<TerrainVertexBuffer>();
let mut terrain_v_buf_needs_update =
world.write_resource::<TerrainVertexBufferNeedsUpdate>();
let camera = &world.read_resource::<camera::Camera>();
// Update buffers
renderer.update_buffer(&ui_v_buf.0, renderer::BufferType::UI);
renderer.update_buffer(&game_v_buf.0, renderer::BufferType::Game);
if terrain_v_buf_needs_update.0 {
renderer.update_buffer(&terrain_v_buf.0, renderer::BufferType::Terrain);
terrain_v_buf_needs_update.0 = false;
}
// Clear & render
renderer.clear();
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Terrain);
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Game);
renderer.clear_depth();
renderer.render_buffer(&camera, [0.0, 0.0, 0.0], renderer::BufferType::UI);
renderer.flush(&mut device);
window.swap_buffers().unwrap();
device.cleanup();
// Reset ECS state after rendering
// After painting, we need to clear the v_buf
ui_v_buf.0.size = 0;
game_v_buf.0.size = 0;
terrain_v_buf.0.size = 0;
// Clear collision list for next frame
let mut collisions = world.write_resource::<Collisions>();
collisions.0.clear();
let mut killed = world.write_resource::<KilledEntities>();
killed.0.clear();
}
// Actually delete all entities that need to be deleted
world.maintain();
// Calculate frame time
let elapsed = start.elapsed();
if fps_count_timer <= 0 {
println!("Time taken (millis): {:?}",
elapsed.as_secs() * 1000 + elapsed.subsec_millis() as u64);
fps_count_timer = 60;
}
fps_count_timer -= 1;
// Sleep until we hit 60fps. Vsync works until the window isn't being
// rendered, then we just consume CPU!
if elapsed.subsec_millis() < 17 && elapsed.as_secs() == 0 {
thread::sleep(time::Duration::from_millis(17) - elapsed);
}
}
}
| {} | identifier_body |
mod.rs | .name)
pub const SHA256_NAME: &str = "SHA256";
/// The name of the sha2-512 algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const SHA512_NAME: &str = "SHA512";
/// An enum representing the hash function used to generate fingerprint
///
/// Used with [`PublicPart::fingerprint()`](trait.PublicPart.html#method.fingerprint) and
/// [`PublicPart::fingerprint_randomart()`](trait.PublicPart.html#method.fingerprint) to generate
/// different types fingerprint and randomarts.
///
/// # Hash Algorithm
/// MD5: This is the default fingerprint type in older versions of openssh.
///
/// SHA2-256: Since OpenSSH 6.8, this became the default option of fingerprint.
///
/// SHA2-512: Although not being documented, it can also be used.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FingerprintHash {
MD5,
SHA256,
SHA512,
}
impl FingerprintHash {
fn hash(self, data: &[u8]) -> Vec<u8> {
fn digest_hash<D>(hasher: &mut D, data: &[u8]) -> Vec<u8>
where
D: Digest + FixedOutputReset,
{
// Fix error[E0034]: multiple applicable items in scope
Digest::update(hasher, data);
hasher.finalize_reset().to_vec()
}
match self {
FingerprintHash::MD5 => digest_hash(&mut Md5::default(), data),
FingerprintHash::SHA256 => digest_hash(&mut Sha256::default(), data),
FingerprintHash::SHA512 => digest_hash(&mut Sha512::default(), data),
}
}
fn name(self) -> &'static str {
match self {
FingerprintHash::MD5 => MD5_NAME,
FingerprintHash::SHA256 => SHA256_NAME,
FingerprintHash::SHA512 => SHA512_NAME,
}
}
}
/// An enum representing the type of key being stored
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum KeyType {
RSA,
DSA,
ECDSA,
ED25519,
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, PartialEq)]
pub(crate) enum PublicKeyType {
RSA(rsa::RsaPublicKey),
DSA(dsa::DsaPublicKey),
ECDSA(ecdsa::EcDsaPublicKey),
ED25519(ed25519::Ed25519PublicKey),
}
#[allow(clippy::upper_case_acronyms)]
pub(crate) enum KeyPairType {
RSA(rsa::RsaKeyPair),
DSA(dsa::DsaKeyPair),
ECDSA(ecdsa::EcDsaKeyPair),
ED25519(ed25519::Ed25519KeyPair),
}
/// General public key type
///
/// This is a type to make it easy to store different types of public key in the container.
/// Each can contain one of the types supported in this crate.
///
/// Public key is usually stored in the `.pub` file when generating the key.
pub struct PublicKey {
pub(crate) key: PublicKeyType,
comment: String,
}
impl PublicKey {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Public>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaPublicKey::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaPublicKey::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaPublicKey::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519PublicKey::from_ossl_ed25519(&pkey.raw_public_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
/// Parse the openssh/PEM format public key file
pub fn from_keystr(keystr: &str) -> OsshResult<Self> {
if keystr.trim().starts_with("-----BEGIN") {
// PEM format
Ok(parse_pem_pubkey(keystr.as_bytes())?)
} else {
// openssh format
Ok(parse_ossh_pubkey(keystr)?)
}
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
PublicKeyType::RSA(_) => KeyType::RSA,
PublicKeyType::DSA(_) => KeyType::DSA,
PublicKeyType::ECDSA(_) => KeyType::ECDSA,
PublicKeyType::ED25519(_) => KeyType::ED25519,
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Serialize the public key as OpenSSH format
pub fn serialize(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Serialize the public key as PEM format
///
/// # Representation
/// - Begin with `-----BEGIN PUBLIC KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PUBLIC KEY-----` for rsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ed25519 key.
///
/// # Note
/// This format cannot store the comment!
pub fn serialize_pem(&self) -> OsshResult<String> {
stringify_pem_pubkey(self)
}
fn inner_key(&self) -> &dyn PublicParts {
match &self.key {
PublicKeyType::RSA(key) => key,
PublicKeyType::DSA(key) => key,
PublicKeyType::ECDSA(key) => key,
PublicKeyType::ED25519(key) => key,
}
}
}
impl Key for PublicKey {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for PublicKey {
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key().blob()
}
fn fingerprint(&self, hash: FingerprintHash) -> Result<Vec<u8>, Error> {
self.inner_key().fingerprint(hash)
}
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key().verify(data, sig)
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.serialize().unwrap())
}
}
impl From<rsa::RsaPublicKey> for PublicKey {
fn from(inner: rsa::RsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaPublicKey> for PublicKey {
fn from(inner: dsa::DsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaPublicKey> for PublicKey {
fn from(inner: ecdsa::EcDsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519PublicKey> for PublicKey {
fn from(inner: ed25519::Ed25519PublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ED25519(inner),
comment: String::new(),
}
}
}
/// General key pair type
///
/// This is a type to make it easy to store different types of key pair in the container.
/// Each can contain one of the types supported in this crate.
///
/// Key pair is the so-called "private key" which contains both public and private parts of an asymmetry key.
pub struct KeyPair {
pub(crate) key: KeyPairType,
comment: String,
}
impl KeyPair {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Private>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaKeyPair::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaKeyPair::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaKeyPair::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519KeyPair::from_ossl_ed25519(&pkey.raw_private_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
pub(crate) fn ossl_pkey(&self) -> OsshResult<PKey<Private>> {
match &self.key {
KeyPairType::RSA(key) => Ok(PKey::from_rsa(key.ossl_rsa().to_owned())?),
KeyPairType::DSA(key) => Ok(PKey::from_dsa(key.ossl_dsa().to_owned())?),
KeyPairType::ECDSA(key) => Ok(PKey::from_ec_key(key.ossl_ec().to_owned())?),
KeyPairType::ED25519(key) => Ok(key.ossl_pkey()?),
}
}
/// Parse a keypair from supporting file types
///
/// The passphrase is required if the keypair is encrypted.
///
/// # OpenSSL PEM
/// - Begin with `-----BEGIN DSA PRIVATE KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PRIVATE KEY-----` for rsa key.
/// - Begin with `-----BEGIN EC PRIVATE KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PRIVATE KEY-----` for Ed25519 key.
///
/// # PKCS#8 Format
/// - Begin with `-----BEGIN PRIVATE KEY-----`
///
/// # Openssh
/// - Begin with `-----BEGIN OPENSSH PRIVATE KEY-----`
///
/// This is the new format which is supported since OpenSSH 6.5, and it became the default format in OpenSSH 7.8.
/// The Ed25519 key can only be stored in this type.
pub fn from_keystr(pem: &str, passphrase: Option<&str>) -> OsshResult<Self> {
parse_keystr(pem.as_bytes(), passphrase)
}
/// Generate a key of the specified type and size
///
/// # Key Size
/// There are some limitations to the key size:
/// - RSA: the size should `>= 1024` and `<= 16384` bits.
/// - DSA: the size should be `1024` bits.
/// - EcDSA: the size should be `256`, `384`, or `521` bits.
/// - Ed25519: the size should be `256` bits.
///
/// If the key size parameter is zero, then it will use the default size to generate the key
/// - RSA: `2048` bits
/// - DSA: `1024` bits
/// - EcDSA: `256` bits
/// - Ed25519: `256` bits
pub fn generate(keytype: KeyType, bits: usize) -> OsshResult<Self> {
Ok(match keytype {
KeyType::RSA => rsa::RsaKeyPair::generate(bits)?.into(),
KeyType::DSA => dsa::DsaKeyPair::generate(bits)?.into(),
KeyType::ECDSA => ecdsa::EcDsaKeyPair::generate(bits)?.into(),
KeyType::ED25519 => ed25519::Ed25519KeyPair::generate(bits)?.into(),
})
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
KeyPairType::RSA(_) => KeyType::RSA,
KeyPairType::DSA(_) => KeyType::DSA,
KeyPairType::ECDSA(_) => KeyType::ECDSA,
KeyPairType::ED25519(_) => KeyType::ED25519,
}
}
/// Serialize the keypair to the OpenSSL PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PEM key will be encrypted.
pub fn serialize_pem(&self, passphrase: Option<&str>) -> OsshResult<String> |
/// Serialize the keypair to the OpenSSL PKCS#8 PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PKCS#8 key will be encrypted.
pub fn serialize_pkcs8(&self, passphrase: Option<&str>) -> OsshResult<String> {
serialize_pkcs8_privkey(self, passphrase)
}
/// Serialize the keypair to the OpenSSH private key format
///
/// If the passphrase is given (set to `Some(...)`) and cipher is not null,
/// then the generated private key will be encrypted.
pub fn serialize_openssh(
&self,
passphrase: Option<&str>,
cipher: Cipher,
) -> OsshResult<String> {
if let Some(passphrase) = passphrase {
Ok(serialize_ossh_privkey(self, passphrase, cipher, 0)?)
} else {
Ok(serialize_ossh_privkey(self, "", Cipher::Null, 0)?)
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Get the OpenSSH public key of the public parts
pub fn serialize_publickey(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Clone the public parts of the key pair
pub fn clone_public_key(&self) -> Result<PublicKey, Error> {
let key = match &self.key {
KeyPairType::RSA(key) => PublicKeyType::RSA(key.clone_public_key()?),
KeyPairType::DSA(key) => PublicKeyType::DSA(key.clone_public_key()?),
KeyPairType::ECDSA(key) => PublicKeyType::ECDSA(key.clone_public_key()?),
KeyPairType::ED25519(key) => PublicKeyType::ED25519(key.clone_public_key()?),
};
Ok(PublicKey {
key,
comment: self.comment.clone(),
})
}
fn inner_key(&self) -> &dyn PrivateParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
fn inner_key_pub(&self) -> &dyn PublicParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
}
impl Key for KeyPair {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for KeyPair {
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key_pub().verify(data, sig)
}
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key_pub().blob()
}
}
impl PrivateParts for KeyPair {
fn sign(&self, data: &[u8]) -> Result<Vec<u8>, Error> {
self.inner_key().sign(data)
}
}
impl From<rsa::RsaKeyPair> for KeyPair {
fn from(inner: rsa::RsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaKeyPair> for KeyPair {
fn from(inner: dsa::DsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaKeyPair> for KeyPair {
fn from(inner: ecdsa::EcDsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519KeyPair> for KeyPair {
fn from(inner: ed25519::Ed25519KeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ED25519(inner),
comment: String::new(),
}
}
}
/// The basic trait of a key
pub trait Key {
/// The size in bits of the key
fn size(&self) -> usize;
/// The key name of the key
fn keyname(&self) -> &'static str;
/// The short key name of the key
fn short_keyname(&self) -> &'static str;
}
/// A trait for operations of a public key
pub trait PublicParts: Key {
/// Verify the data with a detached signature, returning true if the signature is not malformed
fn verify(&self, data: &[u8], sig: &[u8]) -> OsshResult<bool>;
/// Return the binary representation of the public key
fn blob(&self) -> OsshResult<Vec<u8>>;
/// Hash the blob of the public key to generate the fingerprint
fn fingerprint(&self, hash: FingerprintHash) -> OsshResult<Vec<u8>> {
let b = self.blob()?;
Ok(hash.hash(&b))
}
// Rewritten from the OpenSSH project. OpenBSD notice is included below.
/* $OpenBSD: sshkey.c,v 1.120 2022/01/06 22:05:42 djm Exp $ */
/*
* Copyright (c) 2000, 2001 Markus Friedl. All rights reserved.
* Copyright (c) 2008 Alexander von Gernler. All rights reserved.
* Copyright (c) 2010,2011 Damien Miller. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/// Draw an ASCII-art picture from the fingerprint, also known as "randomart"
fn fingerprint_randomart(&self, hash: FingerprintHash) -> OsshResult<String> {
const FLDBASE: usize = 8;
const FLDSIZE_Y: usize = FLDBASE + 1;
const FLDSIZE_X: usize = FLDBASE * 2 + 1;
// Chars to be used after each other every time the worm intersects with itself. Matter of
// taste.
const AUGMENTATION_CHARS: &[u8] = b".o+=*BOX@%&#/^SE";
let len = AUGMENTATION_CHARS.len() - 1;
let mut art = String::with_capacity((FLDSIZE_X + 3) * (FLDSIZE_Y + 2));
// Initialize field.
let mut field = [[0; FLDSIZE_X]; FLDSIZE_Y];
let mut x = FLDSIZE_X / 2;
let mut y = FLDSIZE_Y / 2;
// Process raw key.
let dgst_raw = self.fingerprint(hash)?;
for mut input in dgst_raw.iter().copied() {
// Each byte conveys four 2-bit move commands.
for _ in 0..4 {
// Evaluate 2 bit, rest is shifted later.
x = if (input & 0x1)!= 0 {
x + 1
} else {
x.saturating_sub(1)
};
y = if (input & 0x2)!= 0 {
y + 1
} else {
y.saturating_sub(1)
};
// Assure we are still in bounds.
x = x.min(FLDSIZE_X - 1);
y = y.min(FLDSIZE_Y - 1);
// Augment the field.
if field[y][x] < len as u8 - 2 {
field[y][x] += 1;
}
input >>= 2;
}
}
// Mark starting point and end point.
field[FLDSIZE_Y / 2][FLDSIZE_X / 2] = len as u8 - 1;
field[y][x] = len as u8;
// Assemble title.
let title = format!("[{} {}]", self.short_keyname(), self.size());
// If [type size] won't fit, then try [type]; fits "[ED25519-CERT]".
let title = if title.chars().count() > FLDSIZE_X {
format!("[{}]", self.short_keyname())
} else {
title
};
// Assemble hash ID.
let hash = format!("[{}]", hash.name());
// Output upper border.
art += &format!("+{:-^width$}+\n", title, width = FLDSIZE_X);
// Output content.
#[allow(clippy::needless_range_loop)]
for y in 0..FLDSIZE_Y {
art.push('|');
art.extend(
field[y]
| {
stringify_pem_privkey(self, passphrase)
} | identifier_body |
mod.rs | method.name)
pub const SHA256_NAME: &str = "SHA256";
/// The name of the sha2-512 algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const SHA512_NAME: &str = "SHA512";
/// An enum representing the hash function used to generate fingerprint
///
/// Used with [`PublicPart::fingerprint()`](trait.PublicPart.html#method.fingerprint) and
/// [`PublicPart::fingerprint_randomart()`](trait.PublicPart.html#method.fingerprint) to generate
/// different types fingerprint and randomarts.
///
/// # Hash Algorithm
/// MD5: This is the default fingerprint type in older versions of openssh.
///
/// SHA2-256: Since OpenSSH 6.8, this became the default option of fingerprint.
///
/// SHA2-512: Although not being documented, it can also be used.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FingerprintHash {
MD5, | SHA512,
}
impl FingerprintHash {
fn hash(self, data: &[u8]) -> Vec<u8> {
fn digest_hash<D>(hasher: &mut D, data: &[u8]) -> Vec<u8>
where
D: Digest + FixedOutputReset,
{
// Fix error[E0034]: multiple applicable items in scope
Digest::update(hasher, data);
hasher.finalize_reset().to_vec()
}
match self {
FingerprintHash::MD5 => digest_hash(&mut Md5::default(), data),
FingerprintHash::SHA256 => digest_hash(&mut Sha256::default(), data),
FingerprintHash::SHA512 => digest_hash(&mut Sha512::default(), data),
}
}
fn name(self) -> &'static str {
match self {
FingerprintHash::MD5 => MD5_NAME,
FingerprintHash::SHA256 => SHA256_NAME,
FingerprintHash::SHA512 => SHA512_NAME,
}
}
}
/// An enum representing the type of key being stored
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum KeyType {
RSA,
DSA,
ECDSA,
ED25519,
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, PartialEq)]
pub(crate) enum PublicKeyType {
RSA(rsa::RsaPublicKey),
DSA(dsa::DsaPublicKey),
ECDSA(ecdsa::EcDsaPublicKey),
ED25519(ed25519::Ed25519PublicKey),
}
#[allow(clippy::upper_case_acronyms)]
pub(crate) enum KeyPairType {
RSA(rsa::RsaKeyPair),
DSA(dsa::DsaKeyPair),
ECDSA(ecdsa::EcDsaKeyPair),
ED25519(ed25519::Ed25519KeyPair),
}
/// General public key type
///
/// This is a type to make it easy to store different types of public key in the container.
/// Each can contain one of the types supported in this crate.
///
/// Public key is usually stored in the `.pub` file when generating the key.
pub struct PublicKey {
pub(crate) key: PublicKeyType,
comment: String,
}
impl PublicKey {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Public>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaPublicKey::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaPublicKey::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaPublicKey::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519PublicKey::from_ossl_ed25519(&pkey.raw_public_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
/// Parse the openssh/PEM format public key file
pub fn from_keystr(keystr: &str) -> OsshResult<Self> {
if keystr.trim().starts_with("-----BEGIN") {
// PEM format
Ok(parse_pem_pubkey(keystr.as_bytes())?)
} else {
// openssh format
Ok(parse_ossh_pubkey(keystr)?)
}
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
PublicKeyType::RSA(_) => KeyType::RSA,
PublicKeyType::DSA(_) => KeyType::DSA,
PublicKeyType::ECDSA(_) => KeyType::ECDSA,
PublicKeyType::ED25519(_) => KeyType::ED25519,
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Serialize the public key as OpenSSH format
pub fn serialize(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Serialize the public key as PEM format
///
/// # Representation
/// - Begin with `-----BEGIN PUBLIC KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PUBLIC KEY-----` for rsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ed25519 key.
///
/// # Note
/// This format cannot store the comment!
pub fn serialize_pem(&self) -> OsshResult<String> {
stringify_pem_pubkey(self)
}
fn inner_key(&self) -> &dyn PublicParts {
match &self.key {
PublicKeyType::RSA(key) => key,
PublicKeyType::DSA(key) => key,
PublicKeyType::ECDSA(key) => key,
PublicKeyType::ED25519(key) => key,
}
}
}
impl Key for PublicKey {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for PublicKey {
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key().blob()
}
fn fingerprint(&self, hash: FingerprintHash) -> Result<Vec<u8>, Error> {
self.inner_key().fingerprint(hash)
}
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key().verify(data, sig)
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.serialize().unwrap())
}
}
impl From<rsa::RsaPublicKey> for PublicKey {
fn from(inner: rsa::RsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaPublicKey> for PublicKey {
fn from(inner: dsa::DsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaPublicKey> for PublicKey {
fn from(inner: ecdsa::EcDsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519PublicKey> for PublicKey {
fn from(inner: ed25519::Ed25519PublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ED25519(inner),
comment: String::new(),
}
}
}
/// General key pair type
///
/// This is a type to make it easy to store different types of key pair in the container.
/// Each can contain one of the types supported in this crate.
///
/// Key pair is the so-called "private key" which contains both public and private parts of an asymmetry key.
pub struct KeyPair {
pub(crate) key: KeyPairType,
comment: String,
}
impl KeyPair {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Private>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaKeyPair::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaKeyPair::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaKeyPair::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519KeyPair::from_ossl_ed25519(&pkey.raw_private_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
pub(crate) fn ossl_pkey(&self) -> OsshResult<PKey<Private>> {
match &self.key {
KeyPairType::RSA(key) => Ok(PKey::from_rsa(key.ossl_rsa().to_owned())?),
KeyPairType::DSA(key) => Ok(PKey::from_dsa(key.ossl_dsa().to_owned())?),
KeyPairType::ECDSA(key) => Ok(PKey::from_ec_key(key.ossl_ec().to_owned())?),
KeyPairType::ED25519(key) => Ok(key.ossl_pkey()?),
}
}
/// Parse a keypair from supporting file types
///
/// The passphrase is required if the keypair is encrypted.
///
/// # OpenSSL PEM
/// - Begin with `-----BEGIN DSA PRIVATE KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PRIVATE KEY-----` for rsa key.
/// - Begin with `-----BEGIN EC PRIVATE KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PRIVATE KEY-----` for Ed25519 key.
///
/// # PKCS#8 Format
/// - Begin with `-----BEGIN PRIVATE KEY-----`
///
/// # Openssh
/// - Begin with `-----BEGIN OPENSSH PRIVATE KEY-----`
///
/// This is the new format which is supported since OpenSSH 6.5, and it became the default format in OpenSSH 7.8.
/// The Ed25519 key can only be stored in this type.
pub fn from_keystr(pem: &str, passphrase: Option<&str>) -> OsshResult<Self> {
parse_keystr(pem.as_bytes(), passphrase)
}
/// Generate a key of the specified type and size
///
/// # Key Size
/// There are some limitations to the key size:
/// - RSA: the size should `>= 1024` and `<= 16384` bits.
/// - DSA: the size should be `1024` bits.
/// - EcDSA: the size should be `256`, `384`, or `521` bits.
/// - Ed25519: the size should be `256` bits.
///
/// If the key size parameter is zero, then it will use the default size to generate the key
/// - RSA: `2048` bits
/// - DSA: `1024` bits
/// - EcDSA: `256` bits
/// - Ed25519: `256` bits
pub fn generate(keytype: KeyType, bits: usize) -> OsshResult<Self> {
Ok(match keytype {
KeyType::RSA => rsa::RsaKeyPair::generate(bits)?.into(),
KeyType::DSA => dsa::DsaKeyPair::generate(bits)?.into(),
KeyType::ECDSA => ecdsa::EcDsaKeyPair::generate(bits)?.into(),
KeyType::ED25519 => ed25519::Ed25519KeyPair::generate(bits)?.into(),
})
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
KeyPairType::RSA(_) => KeyType::RSA,
KeyPairType::DSA(_) => KeyType::DSA,
KeyPairType::ECDSA(_) => KeyType::ECDSA,
KeyPairType::ED25519(_) => KeyType::ED25519,
}
}
/// Serialize the keypair to the OpenSSL PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PEM key will be encrypted.
pub fn serialize_pem(&self, passphrase: Option<&str>) -> OsshResult<String> {
stringify_pem_privkey(self, passphrase)
}
/// Serialize the keypair to the OpenSSL PKCS#8 PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PKCS#8 key will be encrypted.
pub fn serialize_pkcs8(&self, passphrase: Option<&str>) -> OsshResult<String> {
serialize_pkcs8_privkey(self, passphrase)
}
/// Serialize the keypair to the OpenSSH private key format
///
/// If the passphrase is given (set to `Some(...)`) and cipher is not null,
/// then the generated private key will be encrypted.
pub fn serialize_openssh(
&self,
passphrase: Option<&str>,
cipher: Cipher,
) -> OsshResult<String> {
if let Some(passphrase) = passphrase {
Ok(serialize_ossh_privkey(self, passphrase, cipher, 0)?)
} else {
Ok(serialize_ossh_privkey(self, "", Cipher::Null, 0)?)
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Get the OpenSSH public key of the public parts
pub fn serialize_publickey(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Clone the public parts of the key pair
pub fn clone_public_key(&self) -> Result<PublicKey, Error> {
let key = match &self.key {
KeyPairType::RSA(key) => PublicKeyType::RSA(key.clone_public_key()?),
KeyPairType::DSA(key) => PublicKeyType::DSA(key.clone_public_key()?),
KeyPairType::ECDSA(key) => PublicKeyType::ECDSA(key.clone_public_key()?),
KeyPairType::ED25519(key) => PublicKeyType::ED25519(key.clone_public_key()?),
};
Ok(PublicKey {
key,
comment: self.comment.clone(),
})
}
fn inner_key(&self) -> &dyn PrivateParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
fn inner_key_pub(&self) -> &dyn PublicParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
}
impl Key for KeyPair {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for KeyPair {
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key_pub().verify(data, sig)
}
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key_pub().blob()
}
}
impl PrivateParts for KeyPair {
fn sign(&self, data: &[u8]) -> Result<Vec<u8>, Error> {
self.inner_key().sign(data)
}
}
impl From<rsa::RsaKeyPair> for KeyPair {
fn from(inner: rsa::RsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaKeyPair> for KeyPair {
fn from(inner: dsa::DsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaKeyPair> for KeyPair {
fn from(inner: ecdsa::EcDsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519KeyPair> for KeyPair {
fn from(inner: ed25519::Ed25519KeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ED25519(inner),
comment: String::new(),
}
}
}
/// The basic trait of a key
pub trait Key {
/// The size in bits of the key
fn size(&self) -> usize;
/// The key name of the key
fn keyname(&self) -> &'static str;
/// The short key name of the key
fn short_keyname(&self) -> &'static str;
}
/// A trait for operations of a public key
pub trait PublicParts: Key {
/// Verify the data with a detached signature, returning true if the signature is not malformed
fn verify(&self, data: &[u8], sig: &[u8]) -> OsshResult<bool>;
/// Return the binary representation of the public key
fn blob(&self) -> OsshResult<Vec<u8>>;
/// Hash the blob of the public key to generate the fingerprint
fn fingerprint(&self, hash: FingerprintHash) -> OsshResult<Vec<u8>> {
let b = self.blob()?;
Ok(hash.hash(&b))
}
// Rewritten from the OpenSSH project. OpenBSD notice is included below.
/* $OpenBSD: sshkey.c,v 1.120 2022/01/06 22:05:42 djm Exp $ */
/*
* Copyright (c) 2000, 2001 Markus Friedl. All rights reserved.
* Copyright (c) 2008 Alexander von Gernler. All rights reserved.
* Copyright (c) 2010,2011 Damien Miller. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/// Draw an ASCII-art picture from the fingerprint, also known as "randomart"
fn fingerprint_randomart(&self, hash: FingerprintHash) -> OsshResult<String> {
const FLDBASE: usize = 8;
const FLDSIZE_Y: usize = FLDBASE + 1;
const FLDSIZE_X: usize = FLDBASE * 2 + 1;
// Chars to be used after each other every time the worm intersects with itself. Matter of
// taste.
const AUGMENTATION_CHARS: &[u8] = b".o+=*BOX@%&#/^SE";
let len = AUGMENTATION_CHARS.len() - 1;
let mut art = String::with_capacity((FLDSIZE_X + 3) * (FLDSIZE_Y + 2));
// Initialize field.
let mut field = [[0; FLDSIZE_X]; FLDSIZE_Y];
let mut x = FLDSIZE_X / 2;
let mut y = FLDSIZE_Y / 2;
// Process raw key.
let dgst_raw = self.fingerprint(hash)?;
for mut input in dgst_raw.iter().copied() {
// Each byte conveys four 2-bit move commands.
for _ in 0..4 {
// Evaluate 2 bit, rest is shifted later.
x = if (input & 0x1)!= 0 {
x + 1
} else {
x.saturating_sub(1)
};
y = if (input & 0x2)!= 0 {
y + 1
} else {
y.saturating_sub(1)
};
// Assure we are still in bounds.
x = x.min(FLDSIZE_X - 1);
y = y.min(FLDSIZE_Y - 1);
// Augment the field.
if field[y][x] < len as u8 - 2 {
field[y][x] += 1;
}
input >>= 2;
}
}
// Mark starting point and end point.
field[FLDSIZE_Y / 2][FLDSIZE_X / 2] = len as u8 - 1;
field[y][x] = len as u8;
// Assemble title.
let title = format!("[{} {}]", self.short_keyname(), self.size());
// If [type size] won't fit, then try [type]; fits "[ED25519-CERT]".
let title = if title.chars().count() > FLDSIZE_X {
format!("[{}]", self.short_keyname())
} else {
title
};
// Assemble hash ID.
let hash = format!("[{}]", hash.name());
// Output upper border.
art += &format!("+{:-^width$}+\n", title, width = FLDSIZE_X);
// Output content.
#[allow(clippy::needless_range_loop)]
for y in 0..FLDSIZE_Y {
art.push('|');
art.extend(
field[y]
| SHA256, | random_line_split |
mod.rs | .name)
pub const SHA256_NAME: &str = "SHA256";
/// The name of the sha2-512 algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const SHA512_NAME: &str = "SHA512";
/// An enum representing the hash function used to generate fingerprint
///
/// Used with [`PublicPart::fingerprint()`](trait.PublicPart.html#method.fingerprint) and
/// [`PublicPart::fingerprint_randomart()`](trait.PublicPart.html#method.fingerprint) to generate
/// different types fingerprint and randomarts.
///
/// # Hash Algorithm
/// MD5: This is the default fingerprint type in older versions of openssh.
///
/// SHA2-256: Since OpenSSH 6.8, this became the default option of fingerprint.
///
/// SHA2-512: Although not being documented, it can also be used.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FingerprintHash {
MD5,
SHA256,
SHA512,
}
impl FingerprintHash {
fn hash(self, data: &[u8]) -> Vec<u8> {
fn digest_hash<D>(hasher: &mut D, data: &[u8]) -> Vec<u8>
where
D: Digest + FixedOutputReset,
{
// Fix error[E0034]: multiple applicable items in scope
Digest::update(hasher, data);
hasher.finalize_reset().to_vec()
}
match self {
FingerprintHash::MD5 => digest_hash(&mut Md5::default(), data),
FingerprintHash::SHA256 => digest_hash(&mut Sha256::default(), data),
FingerprintHash::SHA512 => digest_hash(&mut Sha512::default(), data),
}
}
fn name(self) -> &'static str {
match self {
FingerprintHash::MD5 => MD5_NAME,
FingerprintHash::SHA256 => SHA256_NAME,
FingerprintHash::SHA512 => SHA512_NAME,
}
}
}
/// An enum representing the type of key being stored
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum KeyType {
RSA,
DSA,
ECDSA,
ED25519,
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, PartialEq)]
pub(crate) enum PublicKeyType {
RSA(rsa::RsaPublicKey),
DSA(dsa::DsaPublicKey),
ECDSA(ecdsa::EcDsaPublicKey),
ED25519(ed25519::Ed25519PublicKey),
}
#[allow(clippy::upper_case_acronyms)]
pub(crate) enum KeyPairType {
RSA(rsa::RsaKeyPair),
DSA(dsa::DsaKeyPair),
ECDSA(ecdsa::EcDsaKeyPair),
ED25519(ed25519::Ed25519KeyPair),
}
/// General public key type
///
/// This is a type to make it easy to store different types of public key in the container.
/// Each can contain one of the types supported in this crate.
///
/// Public key is usually stored in the `.pub` file when generating the key.
pub struct PublicKey {
pub(crate) key: PublicKeyType,
comment: String,
}
impl PublicKey {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Public>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaPublicKey::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaPublicKey::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaPublicKey::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519PublicKey::from_ossl_ed25519(&pkey.raw_public_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
/// Parse the openssh/PEM format public key file
pub fn from_keystr(keystr: &str) -> OsshResult<Self> {
if keystr.trim().starts_with("-----BEGIN") {
// PEM format
Ok(parse_pem_pubkey(keystr.as_bytes())?)
} else {
// openssh format
Ok(parse_ossh_pubkey(keystr)?)
}
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
PublicKeyType::RSA(_) => KeyType::RSA,
PublicKeyType::DSA(_) => KeyType::DSA,
PublicKeyType::ECDSA(_) => KeyType::ECDSA,
PublicKeyType::ED25519(_) => KeyType::ED25519,
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Serialize the public key as OpenSSH format
pub fn serialize(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Serialize the public key as PEM format
///
/// # Representation
/// - Begin with `-----BEGIN PUBLIC KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PUBLIC KEY-----` for rsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ed25519 key.
///
/// # Note
/// This format cannot store the comment!
pub fn serialize_pem(&self) -> OsshResult<String> {
stringify_pem_pubkey(self)
}
fn inner_key(&self) -> &dyn PublicParts {
match &self.key {
PublicKeyType::RSA(key) => key,
PublicKeyType::DSA(key) => key,
PublicKeyType::ECDSA(key) => key,
PublicKeyType::ED25519(key) => key,
}
}
}
impl Key for PublicKey {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for PublicKey {
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key().blob()
}
fn fingerprint(&self, hash: FingerprintHash) -> Result<Vec<u8>, Error> {
self.inner_key().fingerprint(hash)
}
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key().verify(data, sig)
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.serialize().unwrap())
}
}
impl From<rsa::RsaPublicKey> for PublicKey {
fn | (inner: rsa::RsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaPublicKey> for PublicKey {
fn from(inner: dsa::DsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaPublicKey> for PublicKey {
fn from(inner: ecdsa::EcDsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519PublicKey> for PublicKey {
fn from(inner: ed25519::Ed25519PublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ED25519(inner),
comment: String::new(),
}
}
}
/// General key pair type
///
/// This is a type to make it easy to store different types of key pair in the container.
/// Each can contain one of the types supported in this crate.
///
/// Key pair is the so-called "private key" which contains both public and private parts of an asymmetry key.
pub struct KeyPair {
pub(crate) key: KeyPairType,
comment: String,
}
impl KeyPair {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Private>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaKeyPair::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaKeyPair::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaKeyPair::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519KeyPair::from_ossl_ed25519(&pkey.raw_private_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
pub(crate) fn ossl_pkey(&self) -> OsshResult<PKey<Private>> {
match &self.key {
KeyPairType::RSA(key) => Ok(PKey::from_rsa(key.ossl_rsa().to_owned())?),
KeyPairType::DSA(key) => Ok(PKey::from_dsa(key.ossl_dsa().to_owned())?),
KeyPairType::ECDSA(key) => Ok(PKey::from_ec_key(key.ossl_ec().to_owned())?),
KeyPairType::ED25519(key) => Ok(key.ossl_pkey()?),
}
}
/// Parse a keypair from supporting file types
///
/// The passphrase is required if the keypair is encrypted.
///
/// # OpenSSL PEM
/// - Begin with `-----BEGIN DSA PRIVATE KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PRIVATE KEY-----` for rsa key.
/// - Begin with `-----BEGIN EC PRIVATE KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PRIVATE KEY-----` for Ed25519 key.
///
/// # PKCS#8 Format
/// - Begin with `-----BEGIN PRIVATE KEY-----`
///
/// # Openssh
/// - Begin with `-----BEGIN OPENSSH PRIVATE KEY-----`
///
/// This is the new format which is supported since OpenSSH 6.5, and it became the default format in OpenSSH 7.8.
/// The Ed25519 key can only be stored in this type.
pub fn from_keystr(pem: &str, passphrase: Option<&str>) -> OsshResult<Self> {
parse_keystr(pem.as_bytes(), passphrase)
}
/// Generate a key of the specified type and size
///
/// # Key Size
/// There are some limitations to the key size:
/// - RSA: the size should `>= 1024` and `<= 16384` bits.
/// - DSA: the size should be `1024` bits.
/// - EcDSA: the size should be `256`, `384`, or `521` bits.
/// - Ed25519: the size should be `256` bits.
///
/// If the key size parameter is zero, then it will use the default size to generate the key
/// - RSA: `2048` bits
/// - DSA: `1024` bits
/// - EcDSA: `256` bits
/// - Ed25519: `256` bits
pub fn generate(keytype: KeyType, bits: usize) -> OsshResult<Self> {
Ok(match keytype {
KeyType::RSA => rsa::RsaKeyPair::generate(bits)?.into(),
KeyType::DSA => dsa::DsaKeyPair::generate(bits)?.into(),
KeyType::ECDSA => ecdsa::EcDsaKeyPair::generate(bits)?.into(),
KeyType::ED25519 => ed25519::Ed25519KeyPair::generate(bits)?.into(),
})
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
KeyPairType::RSA(_) => KeyType::RSA,
KeyPairType::DSA(_) => KeyType::DSA,
KeyPairType::ECDSA(_) => KeyType::ECDSA,
KeyPairType::ED25519(_) => KeyType::ED25519,
}
}
/// Serialize the keypair to the OpenSSL PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PEM key will be encrypted.
pub fn serialize_pem(&self, passphrase: Option<&str>) -> OsshResult<String> {
stringify_pem_privkey(self, passphrase)
}
/// Serialize the keypair to the OpenSSL PKCS#8 PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PKCS#8 key will be encrypted.
pub fn serialize_pkcs8(&self, passphrase: Option<&str>) -> OsshResult<String> {
serialize_pkcs8_privkey(self, passphrase)
}
/// Serialize the keypair to the OpenSSH private key format
///
/// If the passphrase is given (set to `Some(...)`) and cipher is not null,
/// then the generated private key will be encrypted.
pub fn serialize_openssh(
&self,
passphrase: Option<&str>,
cipher: Cipher,
) -> OsshResult<String> {
if let Some(passphrase) = passphrase {
Ok(serialize_ossh_privkey(self, passphrase, cipher, 0)?)
} else {
Ok(serialize_ossh_privkey(self, "", Cipher::Null, 0)?)
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Get the OpenSSH public key of the public parts
pub fn serialize_publickey(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Clone the public parts of the key pair
pub fn clone_public_key(&self) -> Result<PublicKey, Error> {
let key = match &self.key {
KeyPairType::RSA(key) => PublicKeyType::RSA(key.clone_public_key()?),
KeyPairType::DSA(key) => PublicKeyType::DSA(key.clone_public_key()?),
KeyPairType::ECDSA(key) => PublicKeyType::ECDSA(key.clone_public_key()?),
KeyPairType::ED25519(key) => PublicKeyType::ED25519(key.clone_public_key()?),
};
Ok(PublicKey {
key,
comment: self.comment.clone(),
})
}
fn inner_key(&self) -> &dyn PrivateParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
fn inner_key_pub(&self) -> &dyn PublicParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
}
impl Key for KeyPair {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for KeyPair {
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key_pub().verify(data, sig)
}
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key_pub().blob()
}
}
impl PrivateParts for KeyPair {
fn sign(&self, data: &[u8]) -> Result<Vec<u8>, Error> {
self.inner_key().sign(data)
}
}
impl From<rsa::RsaKeyPair> for KeyPair {
fn from(inner: rsa::RsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaKeyPair> for KeyPair {
fn from(inner: dsa::DsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaKeyPair> for KeyPair {
fn from(inner: ecdsa::EcDsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519KeyPair> for KeyPair {
fn from(inner: ed25519::Ed25519KeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ED25519(inner),
comment: String::new(),
}
}
}
/// The basic trait of a key
pub trait Key {
/// The size in bits of the key
fn size(&self) -> usize;
/// The key name of the key
fn keyname(&self) -> &'static str;
/// The short key name of the key
fn short_keyname(&self) -> &'static str;
}
/// A trait for operations of a public key
pub trait PublicParts: Key {
/// Verify the data with a detached signature, returning true if the signature is not malformed
fn verify(&self, data: &[u8], sig: &[u8]) -> OsshResult<bool>;
/// Return the binary representation of the public key
fn blob(&self) -> OsshResult<Vec<u8>>;
/// Hash the blob of the public key to generate the fingerprint
fn fingerprint(&self, hash: FingerprintHash) -> OsshResult<Vec<u8>> {
let b = self.blob()?;
Ok(hash.hash(&b))
}
// Rewritten from the OpenSSH project. OpenBSD notice is included below.
/* $OpenBSD: sshkey.c,v 1.120 2022/01/06 22:05:42 djm Exp $ */
/*
* Copyright (c) 2000, 2001 Markus Friedl. All rights reserved.
* Copyright (c) 2008 Alexander von Gernler. All rights reserved.
* Copyright (c) 2010,2011 Damien Miller. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/// Draw an ASCII-art picture from the fingerprint, also known as "randomart"
fn fingerprint_randomart(&self, hash: FingerprintHash) -> OsshResult<String> {
const FLDBASE: usize = 8;
const FLDSIZE_Y: usize = FLDBASE + 1;
const FLDSIZE_X: usize = FLDBASE * 2 + 1;
// Chars to be used after each other every time the worm intersects with itself. Matter of
// taste.
const AUGMENTATION_CHARS: &[u8] = b".o+=*BOX@%&#/^SE";
let len = AUGMENTATION_CHARS.len() - 1;
let mut art = String::with_capacity((FLDSIZE_X + 3) * (FLDSIZE_Y + 2));
// Initialize field.
let mut field = [[0; FLDSIZE_X]; FLDSIZE_Y];
let mut x = FLDSIZE_X / 2;
let mut y = FLDSIZE_Y / 2;
// Process raw key.
let dgst_raw = self.fingerprint(hash)?;
for mut input in dgst_raw.iter().copied() {
// Each byte conveys four 2-bit move commands.
for _ in 0..4 {
// Evaluate 2 bit, rest is shifted later.
x = if (input & 0x1)!= 0 {
x + 1
} else {
x.saturating_sub(1)
};
y = if (input & 0x2)!= 0 {
y + 1
} else {
y.saturating_sub(1)
};
// Assure we are still in bounds.
x = x.min(FLDSIZE_X - 1);
y = y.min(FLDSIZE_Y - 1);
// Augment the field.
if field[y][x] < len as u8 - 2 {
field[y][x] += 1;
}
input >>= 2;
}
}
// Mark starting point and end point.
field[FLDSIZE_Y / 2][FLDSIZE_X / 2] = len as u8 - 1;
field[y][x] = len as u8;
// Assemble title.
let title = format!("[{} {}]", self.short_keyname(), self.size());
// If [type size] won't fit, then try [type]; fits "[ED25519-CERT]".
let title = if title.chars().count() > FLDSIZE_X {
format!("[{}]", self.short_keyname())
} else {
title
};
// Assemble hash ID.
let hash = format!("[{}]", hash.name());
// Output upper border.
art += &format!("+{:-^width$}+\n", title, width = FLDSIZE_X);
// Output content.
#[allow(clippy::needless_range_loop)]
for y in 0..FLDSIZE_Y {
art.push('|');
art.extend(
field[y]
| from | identifier_name |
mod.rs | (Construct, PlainText)]
pub struct TextElem {
/// A prioritized sequence of font families.
///
/// When processing text, Typst tries all specified font families in order
/// until it finds a font that has the necessary glyphs. In the example
/// below, the font `Inria Serif` is preferred, but since it does not
/// contain Arabic glyphs, the arabic text uses `Noto Sans Arabic` instead.
///
/// ```example
/// #set text(font: (
/// "Inria Serif",
/// "Noto Sans Arabic",
/// ))
///
/// This is Latin. \
/// هذا عربي.
///
/// ```
#[default(FontList(vec![FontFamily::new("Linux Libertine")]))]
pub font: FontList,
/// Whether to allow last resort font fallback when the primary font list
/// contains no match. This lets Typst search through all available fonts
/// for the most similar one that has the necessary glyphs.
///
/// _Note:_ Currently, there are no warnings when fallback is disabled and
/// no glyphs are found. Instead, your text shows up in the form of "tofus":
/// Small boxes that indicate the lack of an appropriate glyph. In the
/// future, you will be able to instruct Typst to issue warnings so you know
/// something is up.
///
/// ```example
/// #set text(font: "Inria Serif")
/// هذا عربي
///
/// #set text(fallback: false)
/// هذا عربي
/// ```
#[default(true)]
pub fallback: bool,
/// The desired font style.
///
/// When an italic style is requested and only an oblique one is available,
/// it is used. Similarly, the other way around, an italic style can stand
/// in for an oblique one. When neither an italic nor an oblique style is
/// available, Typst selects the normal style. Since most fonts are only
/// available either in an italic or oblique style, the difference between
/// italic and oblique style is rarely observable.
///
/// If you want to emphasize your text, you should do so using the
/// [emph]($func/emph) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the emphasis.
///
/// ```example
/// #text(font: "Linux Libertine", style: "italic")[Italic]
/// #text(font: "DejaVu Sans", style: "oblique")[Oblique]
/// ```
pub style: FontStyle,
/// The desired thickness of the font's glyphs. Accepts an integer between
/// `{100}` and `{900}` or one of the predefined weight names. When the
/// desired weight is not available, Typst selects the font from the family
/// that is closest in weight.
///
/// If you want to strongly emphasize your text, you should do so using the
/// [strong]($func/strong) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the strong
/// emphasis.
///
/// ```example
/// #set text(font: "IBM Plex Sans")
///
/// #text(weight: "light")[Light] \
/// #text(weight: "regular")[Regular] \
/// #text(weight: "medium")[Medium] \
/// #text(weight: 500)[Medium] \
/// #text(weight: "bold")[Bold]
/// ```
pub weight: FontWeight,
/// The desired width of the glyphs. Accepts a ratio between `{50%}` and
/// `{200%}`. When the desired width is not available, Typst selects the
/// font from the family that is closest in stretch. This will only stretch
/// the text if a condensed or expanded version of the font is available.
///
/// If you want to adjust the amount of space between characters instead of
/// stretching the glyphs itself, use the [`tracking`]($func/text.tracking)
/// property instead.
///
/// ```example
/// #text(stretch: 75%)[Condensed] \
/// #text(stretch: 100%)[Normal]
/// ```
pub stretch: FontStretch,
/// The size of the glyphs. This value forms the basis of the `em` unit:
/// `{1em}` is equivalent to the font size.
///
/// You can also give the font size itself in `em` units. Then, it is
/// relative to the previous font size.
///
/// ```example
/// #set text(size: 20pt)
/// very #text(1.5em)[big] text
/// ```
#[parse(args.named_or_find("size")?)]
#[fold]
#[default(Abs::pt(11.0))]
pub size: TextSize,
/// The glyph fill color.
///
/// ```example
/// #set text(fill: red)
/// This text is red.
/// ```
#[parse(args.named_or_find("fill")?)]
#[default(Color::BLACK.into())]
pub fill: Paint,
/// The amount of space that should be added between characters.
///
/// ```example
/// #set text(tracking: 1.5pt)
/// Distant text.
/// ```
#[resolve]
pub tracking: Length,
/// The amount of space between words.
///
/// Can be given as an absolute length, but also relative to the width of
/// the space character in the font.
///
/// If you want to adjust the amount of space between characters rather than
/// words, use the [`tracking`]($func/text.tracking) property instead.
///
/// ```example
/// #set text(spacing: 200%)
/// Text with distant words.
/// ```
#[resolve]
#[default(Rel::one())]
pub spacing: Rel<Length>,
/// An amount to shift the text baseline by.
///
/// ```example
/// A #text(baseline: 3pt)[lowered]
/// word.
/// ```
#[resolve]
pub baseline: Length,
/// Whether certain glyphs can hang over into the margin in justified text.
/// This can make justification visually more pleasing.
///
/// ```example
/// #set par(justify: true)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
///
/// #set text(overhang: false)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
/// ```
#[default(true)]
pub overhang: bool,
/// The top end of the conceptual frame around the text used for layout and
/// positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(top-edge: "ascender")
/// #rect(fill: aqua)[Typst]
///
/// #set text(top-edge: "cap-height")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(TopEdge::Metric(TopEdgeMetric::CapHeight))]
pub top_edge: TopEdge,
/// The bottom end of the conceptual frame around the text used for layout
/// and positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(bottom-edge: "baseline")
/// #rect(fill: aqua)[Typst]
///
/// #set text(bottom-edge: "descender")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(BottomEdge::Metric(BottomEdgeMetric::Baseline))]
pub bottom_edge: BottomEdge,
/// An [ISO 639-1/2/3 language code.](https://en.wikipedia.org/wiki/ISO_639)
///
/// Setting the correct language affects various parts of Typst:
///
/// - The text processing pipeline can make more informed choices.
/// - Hyphenation will use the correct patterns for the language.
/// - [Smart quotes]($func/smartquote) turns into the correct quotes for the
/// language.
/// - And all other things which are language-aware.
///
/// ```example
/// #set text(lang: "de")
/// #outline()
///
/// = Einleitung
/// In diesem Dokument,...
/// ```
#[default(Lang::ENGLISH)]
pub lang: Lang,
/// An [ISO 3166-1 alpha-2 region code.](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
///
/// This lets the text processing pipeline make more informed choices.
pub region: Option<Region>,
/// The OpenType writing script.
///
/// The combination of `{lang}` and `{script}` determine how font features,
/// such as glyph substitution, are implemented. Frequently the value is a
/// modified (all-lowercase) ISO 15924 script identifier, and the `math`
/// writing script is used for features appropriate for mathematical
/// symbols.
///
/// When set to `{auto}`, the default and recommended setting, an
/// appropriate script is chosen for each block of characters sharing a
/// common Unicode script property.
///
/// ```example
/// #set text(
/// font: "Linux Libertine",
/// size: 20pt,
/// )
///
/// #let scedilla = [Ş]
/// #scedilla // S with a cedilla
///
/// #set text(lang: "ro", script: "latn")
/// #scedilla // S with a subscript comma
///
/// #set text(lang: "ro", script: "grek")
/// #scedilla // S with a cedilla
/// ```
pub script: Smart<WritingScript>,
/// The dominant direction for text and inline objects. Possible values are:
///
/// - `{auto}`: Automatically infer the direction from the `lang` property.
/// - `{ltr}`: Layout text from left to right.
/// - `{rtl}`: Layout text from right to left.
///
/// When writing in right-to-left scripts like Arabic or Hebrew, you should
/// set the [text language]($func/text.lang) or direction. While individual
/// runs of text are automatically layouted in the correct direction,
/// setting the dominant direction gives the bidirectional reordering
/// algorithm the necessary information to correctly place punctuation and
/// inline objects. Furthermore, setting the direction affects the alignment
/// values `start` and `end`, which are equivalent to `left` and `right` in
/// `ltr` text and the other way around in `rtl` text.
///
/// If you set this to `rtl` and experience bugs or in some way bad looking
/// output, please do get in touch with us through the
/// [contact form](https://typst.app/contact) or our
/// [Discord server]($community/#discord)!
///
/// ```example
/// #set text(dir: rtl)
/// هذا عربي.
/// ```
#[resolve]
pub dir: TextDir,
/// Whether to hyphenate text to improve line breaking. When `{auto}`, text
/// will be hyphenated if and only if justification is enabled.
///
/// Setting the [text language]($func/text.lang) ensures that the correct
/// hyphenation patterns are used.
///
/// ```example
/// #set page(width: 200pt)
///
/// #set par(justify: true)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
///
/// #set text(hyphenate: false)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
/// ```
#[resolve]
pub hyphenate: Hyphenate,
/// Whether to apply kerning.
///
/// When enabled, specific letter pairings move closer together or further
/// apart for a more visually pleasing result. The example below
/// demonstrates how decreasing the gap between the "T" and "o" results in a
/// more natural look. Setting this to `{false}` disables kerning by turning
/// off the OpenType `kern` font feature.
///
/// ```example
/// #set text(size: 25pt)
/// Totally
///
/// #set text(kerning: false)
/// Totally
/// ```
#[default(true)]
pub kerning: bool,
/// Whether to apply stylistic alternates.
///
/// Sometimes fonts contain alternative glyphs for the same codepoint.
/// Setting this to `{true}` switches to these by enabling the OpenType
/// `salt` font feature.
///
/// ```example
/// #set text(
/// font: "IBM Plex Sans",
/// size: 20pt,
/// )
///
/// 0, a, g, ß
///
/// #set text(alternates: true)
/// 0, a, g, ß
/// ```
#[default(false)]
pub alternates: bool,
/// Which stylistic set to apply. Font designers can categorize alternative
/// glyphs forms into stylistic sets. As this value is highly font-specific,
/// you need to consult your font to know which sets are available. When set
/// to an integer between `{1}` and `{20}`, enables the corresponding
/// OpenType font feature from `ss01`,..., `ss20`.
pub stylistic_set: Option<StylisticSet>,
/// Whether standard ligatures are active.
///
/// Certain letter combinations like "fi" are often displayed as a single
/// merged glyph called a _ligature._ Setting this to `{false}` disables
/// these ligatures by turning off the OpenType `liga` and `clig` font
/// features.
///
/// ```example
/// #set text(size: 20pt)
/// A fine ligature.
///
/// #set text(ligatures: false)
/// A fine ligature.
/// ```
#[default(true)]
pub ligatures: bool,
/// Whether ligatures that should be used sparingly are active. Setting this
/// to `{true}` enables the OpenType `dlig` font feature.
#[default(false)]
pub discretionary_ligatures: bool,
/// Whether historical ligatures are active. Setting this to `{true}`
/// enables the OpenType `hlig` font feature.
#[default(false)]
pub historical_ligatures: bool,
/// Which kind of numbers / figures to select. When set to `{auto}`, the
/// default numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-type: "lining")
/// Number 9.
///
/// #set text(number-type: "old-style")
/// Number 9.
/// ```
pub number_type: Smart<NumberType>,
/// The width of numbers / figures. When set to `{auto}`, the default
/// numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-width: "proportional")
/// A 12 B 34. \
/// A 56 B 78.
///
/// #set text(number-width: "tabular")
/// A 12 B 34. \
/// A 56 B 78.
/// ```
pub number_width: Smart<NumberWidth>,
/// Whether to have a slash through the zero glyph. Setting this to `{true}`
/// enables the OpenType `zero` font feature.
///
/// ```example
/// 0, #text(slashed-zero: true)[0]
/// ```
#[default(false)]
pub slashed_zero: bool,
/// Whether to turn numbers into fractions. Setting this to `{true}`
/// enables the OpenType `frac` font feature.
///
/// It is not advisable to enable this property globally as it will mess
/// with all appearances of numbers after a slash (e.g., in URLs). Instead,
/// enable it locally when you want a fraction.
///
/// ```example
/// 1/2 \
/// #text(fractions: true)[1/2]
/// ```
#[default(false)]
pub fractions: bool,
/// Raw OpenType features to apply.
///
/// - If given an array of strings, sets the features identified by the
/// strings to `{1}`.
/// - If given a dictionary mapping to numbers, sets the features
/// identified by the keys to the values.
///
/// ```example
/// // Enable the `frac` feature manually.
/// #set text(features: ("frac",))
/// 1/2
/// ```
#[fold]
pub features: FontFeatures,
/// Content in which all text is styled according to the other arguments.
#[external]
#[required]
pub body: Content,
/// The text.
#[internal]
#[required]
pub text: EcoString,
/// A delta to apply on the font weight.
#[internal]
#[fold]
pub delta: Delta,
/// Whether the font style should be inverted.
#[internal]
#[fold]
#[default(false)]
pub emph: Toggle,
/// Decorative lines.
#[internal]
#[fold]
pub deco: Decoration,
/// A case transformation that should be applied to the text.
#[internal]
pub case: Option<Case>,
/// Whether small capital glyphs should be used. ("smcp")
#[internal]
#[default(false)]
pub smallcaps: bool,
}
impl TextElem {
/// Create a new packed text element.
pub fn packed(text: impl Into<EcoString>) -> Content {
Self::new(text.into()).pack()
}
}
impl Construct for TextElem {
fn construct(vm: &mut Vm, args: &mut Args) -> SourceResult<Content> {
// The text constructor is special: It doesn't create a text element.
// Instead, it leaves the passed argument structurally unchanged, but
// styles all text in it.
let styles = Self::set(vm, args)?;
let body = args.expect::<Content>("body")?;
Ok(body.styled_with_map(styles))
}
}
impl PlainText for TextElem {
fn plain_text(&self, text: &mut EcoString) {
text.push_str(&self.text());
}
}
/// A lowercased font family like "arial".
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct FontFamily(EcoString);
impl FontFamily {
/// Create a named font family variant.
pub fn new(string: &str) -> Self {
Self(string.to_lowercase().into())
}
/// The lowercased family name.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl Debug for FontFamily {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
cast! {
FontFamily,
self => self.0.into_value(),
string: EcoString => Self::new(&string),
}
/// Font family fallback list.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontList(pub Vec<FontFamily>);
impl IntoIterator for FontList {
type IntoIter = std::vec::IntoIter<FontFamily>;
type Item = FontFamily;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
cast! {
FontList,
self => if self.0.len() == 1 {
self.0.into_iter().next().unwrap().0.into_value()
} else {
self.0.into_value()
},
family: FontFamily => Self(vec![family]),
values: Array => Self(values.into_iter().map(|v| v.cast()).collect::<StrResult<_>>()?),
}
/// The size of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextSize(pub Length);
impl Fold for TextSize {
type Output = Abs;
fn fold(self, outer: Self::Output) -> Self::Output {
self.0.em.at(outer) + self.0.abs
}
}
cast! {
TextSize,
self => self.0.into_value(),
v: Length => Self(v),
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum TopEdge {
/// An edge specified via font metrics or bounding box.
Metric(TopEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl TopEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(TopEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
TopEdge::Metric(metric) => {
if let Ok(met | ngth) => length.resolve(styles),
}
}
}
cast! {
TopEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: TopEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum TopEdgeMetric {
/// The font's ascender, which typically exceeds the height of all glyphs.
Ascender,
/// The approximate height of uppercase letters.
CapHeight,
/// The approximate height of non-ascending lowercase letters.
XHeight,
/// The baseline on which the letters rest.
Baseline,
/// The top edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for TopEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Ascender => Ok(VerticalFontMetric::Ascender),
Self::CapHeight => Ok(VerticalFontMetric::CapHeight),
Self::XHeight => Ok(VerticalFontMetric::XHeight),
Self::Baseline => Ok(VerticalFontMetric::Baseline),
_ => Err(()),
}
}
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum BottomEdge {
/// An edge specified via font metrics or bounding box.
Metric(BottomEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl BottomEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(BottomEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
BottomEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_min)).resolve(styles))
.unwrap_or_default()
}
}
BottomEdge::Length(length) => length.resolve(styles),
}
}
}
cast! {
BottomEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: BottomEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the bottom edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum BottomEdgeMetric {
/// The baseline on which the letters rest.
Baseline,
/// The font's descender, which typically exceeds the depth of all glyphs.
Descender,
/// The bottom edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for BottomEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Baseline => Ok(VerticalFontMetric::Baseline),
Self::Descender => Ok(VerticalFontMetric::Descender),
_ => Err(()),
}
}
}
/// The direction of text and inline objects in their line.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextDir(pub Smart<Dir>);
cast! {
TextDir,
self => self.0.into_value(),
v: Smart<Dir> => {
if v.map_or(false, |dir| dir.axis() == Axis::Y) {
bail!("text direction must be horizontal");
}
Self(v)
},
}
impl Resolve for TextDir {
type Output = Dir;
fn resolve(self, styles: StyleChain) -> Self::Output {
match self.0 {
Smart::Auto => Text | ric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_max)).resolve(styles))
.unwrap_or_default()
}
}
TopEdge::Length(le | conditional_block |
mod.rs | #[element(Construct, PlainText)]
pub struct TextElem {
/// A prioritized sequence of font families.
///
/// When processing text, Typst tries all specified font families in order
/// until it finds a font that has the necessary glyphs. In the example
/// below, the font `Inria Serif` is preferred, but since it does not
/// contain Arabic glyphs, the arabic text uses `Noto Sans Arabic` instead.
///
/// ```example
/// #set text(font: (
/// "Inria Serif",
/// "Noto Sans Arabic",
/// ))
///
/// This is Latin. \
/// هذا عربي.
///
/// ```
#[default(FontList(vec![FontFamily::new("Linux Libertine")]))]
pub font: FontList,
/// Whether to allow last resort font fallback when the primary font list
/// contains no match. This lets Typst search through all available fonts
/// for the most similar one that has the necessary glyphs.
///
/// _Note:_ Currently, there are no warnings when fallback is disabled and
/// no glyphs are found. Instead, your text shows up in the form of "tofus":
/// Small boxes that indicate the lack of an appropriate glyph. In the
/// future, you will be able to instruct Typst to issue warnings so you know
/// something is up.
///
/// ```example
/// #set text(font: "Inria Serif")
/// هذا عربي
///
/// #set text(fallback: false)
/// هذا عربي
/// ```
#[default(true)]
pub fallback: bool,
/// The desired font style.
///
/// When an italic style is requested and only an oblique one is available,
/// it is used. Similarly, the other way around, an italic style can stand
/// in for an oblique one. When neither an italic nor an oblique style is
/// available, Typst selects the normal style. Since most fonts are only
/// available either in an italic or oblique style, the difference between
/// italic and oblique style is rarely observable.
///
/// If you want to emphasize your text, you should do so using the
/// [emph]($func/emph) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the emphasis.
///
/// ```example
/// #text(font: "Linux Libertine", style: "italic")[Italic]
/// #text(font: "DejaVu Sans", style: "oblique")[Oblique]
/// ```
pub style: FontStyle,
/// The desired thickness of the font's glyphs. Accepts an integer between
/// `{100}` and `{900}` or one of the predefined weight names. When the
/// desired weight is not available, Typst selects the font from the family
/// that is closest in weight.
///
/// If you want to strongly emphasize your text, you should do so using the
/// [strong]($func/strong) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the strong
/// emphasis.
///
/// ```example
/// #set text(font: "IBM Plex Sans")
///
/// #text(weight: "light")[Light] \
/// #text(weight: "regular")[Regular] \
/// #text(weight: "medium")[Medium] \
/// #text(weight: 500)[Medium] \
/// #text(weight: "bold")[Bold]
/// ```
pub weight: FontWeight,
/// The desired width of the glyphs. Accepts a ratio between `{50%}` and
/// `{200%}`. When the desired width is not available, Typst selects the
/// font from the family that is closest in stretch. This will only stretch
/// the text if a condensed or expanded version of the font is available.
///
/// If you want to adjust the amount of space between characters instead of
/// stretching the glyphs itself, use the [`tracking`]($func/text.tracking)
/// property instead.
///
/// ```example
/// #text(stretch: 75%)[Condensed] \
/// #text(stretch: 100%)[Normal]
/// ```
pub stretch: FontStretch,
/// The size of the glyphs. This value forms the basis of the `em` unit:
/// `{1em}` is equivalent to the font size.
///
/// You can also give the font size itself in `em` units. Then, it is
/// relative to the previous font size.
///
/// ```example
/// #set text(size: 20pt)
/// very #text(1.5em)[big] text
/// ```
#[parse(args.named_or_find("size")?)]
#[fold]
#[default(Abs::pt(11.0))]
pub size: TextSize,
/// The glyph fill color.
///
/// ```example
/// #set text(fill: red)
/// This text is red.
/// ```
#[parse(args.named_or_find("fill")?)]
#[default(Color::BLACK.into())]
pub fill: Paint,
/// The amount of space that should be added between characters.
///
/// ```example
/// #set text(tracking: 1.5pt)
/// Distant text.
/// ```
#[resolve]
pub tracking: Length,
/// The amount of space between words.
///
/// Can be given as an absolute length, but also relative to the width of
/// the space character in the font.
///
/// If you want to adjust the amount of space between characters rather than
/// words, use the [`tracking`]($func/text.tracking) property instead.
///
/// ```example
/// #set text(spacing: 200%)
/// Text with distant words.
/// ```
#[resolve]
#[default(Rel::one())]
pub spacing: Rel<Length>,
/// An amount to shift the text baseline by.
///
/// ```example
/// A #text(baseline: 3pt)[lowered]
/// word.
/// ```
#[resolve]
pub baseline: Length,
/// Whether certain glyphs can hang over into the margin in justified text.
/// This can make justification visually more pleasing.
///
/// ```example
/// #set par(justify: true)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
///
/// #set text(overhang: false)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
/// ```
#[default(true)]
pub overhang: bool,
/// The top end of the conceptual frame around the text used for layout and
/// positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(top-edge: "ascender")
/// #rect(fill: aqua)[Typst]
///
/// #set text(top-edge: "cap-height")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(TopEdge::Metric(TopEdgeMetric::CapHeight))]
pub top_edge: TopEdge,
/// The bottom end of the conceptual frame around the text used for layout
/// and positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(bottom-edge: "baseline")
/// #rect(fill: aqua)[Typst]
///
/// #set text(bottom-edge: "descender")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(BottomEdge::Metric(BottomEdgeMetric::Baseline))]
pub bottom_edge: BottomEdge,
/// An [ISO 639-1/2/3 language code.](https://en.wikipedia.org/wiki/ISO_639)
///
/// Setting the correct language affects various parts of Typst:
///
/// - The text processing pipeline can make more informed choices.
/// - Hyphenation will use the correct patterns for the language.
/// - [Smart quotes]($func/smartquote) turns into the correct quotes for the
/// language.
/// - And all other things which are language-aware.
///
/// ```example
/// #set text(lang: "de")
/// #outline()
///
/// = Einleitung
/// In diesem Dokument,...
/// ```
#[default(Lang::ENGLISH)]
pub lang: Lang,
/// An [ISO 3166-1 alpha-2 region code.](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
///
/// This lets the text processing pipeline make more informed choices.
pub region: Option<Region>,
/// The OpenType writing script.
///
/// The combination of `{lang}` and `{script}` determine how font features,
/// such as glyph substitution, are implemented. Frequently the value is a
/// modified (all-lowercase) ISO 15924 script identifier, and the `math`
/// writing script is used for features appropriate for mathematical
/// symbols.
///
/// When set to `{auto}`, the default and recommended setting, an
/// appropriate script is chosen for each block of characters sharing a
/// common Unicode script property.
///
/// ```example
/// #set text(
/// font: "Linux Libertine",
/// size: 20pt,
/// )
///
/// #let scedilla = [Ş]
/// #scedilla // S with a cedilla
///
/// #set text(lang: "ro", script: "latn")
/// #scedilla // S with a subscript comma
///
/// #set text(lang: "ro", script: "grek")
/// #scedilla // S with a cedilla
/// ```
pub script: Smart<WritingScript>,
/// The dominant direction for text and inline objects. Possible values are:
///
/// - `{auto}`: Automatically infer the direction from the `lang` property.
/// - `{ltr}`: Layout text from left to right.
/// - `{rtl}`: Layout text from right to left.
///
/// When writing in right-to-left scripts like Arabic or Hebrew, you should
/// set the [text language]($func/text.lang) or direction. While individual
/// runs of text are automatically layouted in the correct direction,
/// setting the dominant direction gives the bidirectional reordering
/// algorithm the necessary information to correctly place punctuation and
/// inline objects. Furthermore, setting the direction affects the alignment
/// values `start` and `end`, which are equivalent to `left` and `right` in
/// `ltr` text and the other way around in `rtl` text.
///
/// If you set this to `rtl` and experience bugs or in some way bad looking
/// output, please do get in touch with us through the
/// [contact form](https://typst.app/contact) or our
/// [Discord server]($community/#discord)!
///
/// ```example
/// #set text(dir: rtl)
/// هذا عربي.
/// ```
#[resolve]
pub dir: TextDir,
/// Whether to hyphenate text to improve line breaking. When `{auto}`, text
/// will be hyphenated if and only if justification is enabled.
///
/// Setting the [text language]($func/text.lang) ensures that the correct
/// hyphenation patterns are used.
///
/// ```example
/// #set page(width: 200pt)
///
/// #set par(justify: true)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
///
/// #set text(hyphenate: false)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
/// ```
#[resolve]
pub hyphenate: Hyphenate,
/// Whether to apply kerning.
///
/// When enabled, specific letter pairings move closer together or further
/// apart for a more visually pleasing result. The example below
/// demonstrates how decreasing the gap between the "T" and "o" results in a
/// more natural look. Setting this to `{false}` disables kerning by turning
/// off the OpenType `kern` font feature.
///
/// ```example
/// #set text(size: 25pt)
/// Totally
///
/// #set text(kerning: false)
/// Totally
/// ```
#[default(true)]
pub kerning: bool,
/// Whether to apply stylistic alternates.
///
/// Sometimes fonts contain alternative glyphs for the same codepoint.
/// Setting this to `{true}` switches to these by enabling the OpenType
/// `salt` font feature.
///
/// ```example
/// #set text(
/// font: "IBM Plex Sans",
/// size: 20pt,
/// )
///
/// 0, a, g, ß
///
/// #set text(alternates: true)
/// 0, a, g, ß
/// ```
#[default(false)]
pub alternates: bool,
/// Which stylistic set to apply. Font designers can categorize alternative
/// glyphs forms into stylistic sets. As this value is highly font-specific,
/// you need to consult your font to know which sets are available. When set
/// to an integer between `{1}` and `{20}`, enables the corresponding
/// OpenType font feature from `ss01`,..., `ss20`.
pub stylistic_set: Option<StylisticSet>,
/// Whether standard ligatures are active.
///
/// Certain letter combinations like "fi" are often displayed as a single
/// merged glyph called a _ligature._ Setting this to `{false}` disables
/// these ligatures by turning off the OpenType `liga` and `clig` font
/// features.
///
/// ```example
/// #set text(size: 20pt)
/// A fine ligature.
///
/// #set text(ligatures: false)
/// A fine ligature.
/// ```
#[default(true)]
pub ligatures: bool,
/// Whether ligatures that should be used sparingly are active. Setting this
/// to `{true}` enables the OpenType `dlig` font feature.
#[default(false)]
pub discretionary_ligatures: bool,
/// Whether historical ligatures are active. Setting this to `{true}`
/// enables the OpenType `hlig` font feature.
#[default(false)]
pub historical_ligatures: bool,
/// Which kind of numbers / figures to select. When set to `{auto}`, the
/// default numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-type: "lining")
/// Number 9.
///
/// #set text(number-type: "old-style")
/// Number 9.
/// ```
pub number_type: Smart<NumberType>,
/// The width of numbers / figures. When set to `{auto}`, the default
/// numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-width: "proportional")
/// A 12 B 34. \
/// A 56 B 78.
///
/// #set text(number-width: "tabular")
/// A 12 B 34. \
/// A 56 B 78.
/// ```
pub number_width: Smart<NumberWidth>,
/// Whether to have a slash through the zero glyph. Setting this to `{true}`
/// enables the OpenType `zero` font feature.
///
/// ```example
/// 0, #text(slashed-zero: true)[0]
/// ```
#[default(false)]
pub slashed_zero: bool,
/// Whether to turn numbers into fractions. Setting this to `{true}`
/// enables the OpenType `frac` font feature.
///
/// It is not advisable to enable this property globally as it will mess
/// with all appearances of numbers after a slash (e.g., in URLs). Instead,
/// enable it locally when you want a fraction.
///
/// ```example
/// 1/2 \
/// #text(fractions: true)[1/2]
/// ```
#[default(false)]
pub fractions: bool,
/// Raw OpenType features to apply.
///
/// - If given an array of strings, sets the features identified by the
/// strings to `{1}`.
/// - If given a dictionary mapping to numbers, sets the features
/// identified by the keys to the values.
///
/// ```example
/// // Enable the `frac` feature manually.
/// #set text(features: ("frac",))
/// 1/2
/// ```
#[fold]
pub features: FontFeatures,
/// Content in which all text is styled according to the other arguments.
#[external]
#[required]
pub body: Content,
/// The text.
#[internal]
#[required]
pub text: EcoString,
/// A delta to apply on the font weight.
#[internal]
#[fold]
pub delta: Delta,
/// Whether the font style should be inverted.
#[internal]
#[fold]
#[default(false)]
pub emph: Toggle,
/// Decorative lines.
#[internal]
#[fold]
pub deco: Decoration,
/// A case transformation that should be applied to the text.
#[internal]
pub case: Option<Case>,
/// Whether small capital glyphs should be used. ("smcp")
#[internal]
#[default(false)]
pub smallcaps: bool,
}
impl TextElem {
/// Create a new packed text element.
pub fn packed(text: impl Into<EcoString>) -> Content {
Self::new(text.into()).pack()
}
}
impl Construct for TextElem {
fn construct(vm: &mut Vm, args: &mut Args) -> SourceResult<Content> {
// The text constructor is special: It doesn't create a text element.
// Instead, it leaves the passed argument structurally unchanged, but
// styles all text in it.
let styles = Self::set(vm, args)?;
let body = args.expect::<Content>("body")?;
Ok(body.styled_with_map(styles))
}
}
impl PlainText for TextElem {
fn plain_text(&self, text: &mut EcoString) {
text.push_str(&self.text());
}
}
/// A lowercased font family like "arial".
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct FontFamily(EcoString);
impl FontFamily {
/// Create a named font family variant.
pub fn new(string: &str) -> Self {
Self(string.to_lowercase().into())
}
/// The lowercased family name.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl Debug for FontFamily {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
cast! {
FontFamily,
self => self.0.into_value(),
string: EcoString => Self::new(&string),
}
/// Font family fallback list.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontList(pub Vec<FontFamily>);
impl IntoIterator for FontList {
type IntoIter = std::vec::IntoIter<FontFamily>;
type Item = FontFamily;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
cast! {
FontList,
self => if self.0.len() == 1 {
self.0.into_iter().next().unwrap().0.into_value()
} else {
self.0.into_value()
},
family: FontFamily => Self(vec![family]),
values: Array => Self(values.into_iter().map(|v| v.cast()).collect::<StrResult<_>>()?),
}
/// The size of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextSize(pub Length);
impl Fold for TextSize {
type Output = Abs;
fn fold(self, outer: Self::Output) -> Self::Output {
self.0.em.at(outer) + self.0.abs
}
}
cast! {
TextSize,
self => self.0.into_value(),
v: Length => Self(v),
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum TopEdge {
/// An edge specified via font metrics or bounding box.
Metric(TopEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl TopEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(TopEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
TopEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_max)).resolve(styles))
.unwrap_or_default()
}
}
TopEdge::Length(length) => length.resolve(styles),
}
}
}
cast! {
TopEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: TopEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum TopEdgeMetric {
/// The font's ascender, which typically exceeds the height of all glyphs.
Ascender,
/// The approximate height of uppercase letters.
CapHeight,
/// The approximate height of non-ascending lowercase letters.
XHeight,
/// The baseline on which the letters rest.
Baseline,
/// The top edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for TopEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Ascender => Ok(VerticalFontMetric::Ascender),
Self::CapHeight => Ok(VerticalFontMetric::CapHeight),
Self::XHeight => Ok(VerticalFontMetric::XHeight),
Self::Baseline => Ok(VerticalFontMetric::Baseline),
_ => Err(()),
}
}
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum BottomEdge {
/// An edge specified via font metrics or bounding box.
Metric(BottomEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl BottomEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(BottomEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
BottomEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_min)).resolve(styles))
.unwrap_or_default()
}
}
BottomEdge::Length(length) => length.resolve(styles),
} | self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: BottomEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the bottom edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum BottomEdgeMetric {
/// The baseline on which the letters rest.
Baseline,
/// The font's descender, which typically exceeds the depth of all glyphs.
Descender,
/// The bottom edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for BottomEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Baseline => Ok(VerticalFontMetric::Baseline),
Self::Descender => Ok(VerticalFontMetric::Descender),
_ => Err(()),
}
}
}
/// The direction of text and inline objects in their line.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextDir(pub Smart<Dir>);
cast! {
TextDir,
self => self.0.into_value(),
v: Smart<Dir> => {
if v.map_or(false, |dir| dir.axis() == Axis::Y) {
bail!("text direction must be horizontal");
}
Self(v)
},
}
impl Resolve for TextDir {
type Output = Dir;
fn resolve(self, styles: StyleChain) -> Self::Output {
match self.0 {
Smart::Auto => TextElem::lang_ | }
}
cast! {
BottomEdge, | random_line_split |
mod.rs | (global: &mut Scope) {
global.define("text", TextElem::func());
global.define("linebreak", LinebreakElem::func());
global.define("smartquote", SmartQuoteElem::func());
global.define("strong", StrongElem::func());
global.define("emph", EmphElem::func());
global.define("lower", lower_func());
global.define("upper", upper_func());
global.define("smallcaps", smallcaps_func());
global.define("sub", SubElem::func());
global.define("super", SuperElem::func());
global.define("underline", UnderlineElem::func());
global.define("strike", StrikeElem::func());
global.define("overline", OverlineElem::func());
global.define("raw", RawElem::func());
global.define("lorem", lorem_func());
}
/// Customizes the look and layout of text in a variety of ways.
///
/// This function is used frequently, both with set rules and directly. While
/// the set rule is often the simpler choice, calling the `text` function
/// directly can be useful when passing text as an argument to another function.
///
/// ## Example { #example }
/// ```example
/// #set text(18pt)
/// With a set rule.
///
/// #emph(text(blue)[
/// With a function call.
/// ])
/// ```
///
/// Display: Text
/// Category: text
#[element(Construct, PlainText)]
pub struct TextElem {
/// A prioritized sequence of font families.
///
/// When processing text, Typst tries all specified font families in order
/// until it finds a font that has the necessary glyphs. In the example
/// below, the font `Inria Serif` is preferred, but since it does not
/// contain Arabic glyphs, the arabic text uses `Noto Sans Arabic` instead.
///
/// ```example
/// #set text(font: (
/// "Inria Serif",
/// "Noto Sans Arabic",
/// ))
///
/// This is Latin. \
/// هذا عربي.
///
/// ```
#[default(FontList(vec![FontFamily::new("Linux Libertine")]))]
pub font: FontList,
/// Whether to allow last resort font fallback when the primary font list
/// contains no match. This lets Typst search through all available fonts
/// for the most similar one that has the necessary glyphs.
///
/// _Note:_ Currently, there are no warnings when fallback is disabled and
/// no glyphs are found. Instead, your text shows up in the form of "tofus":
/// Small boxes that indicate the lack of an appropriate glyph. In the
/// future, you will be able to instruct Typst to issue warnings so you know
/// something is up.
///
/// ```example
/// #set text(font: "Inria Serif")
/// هذا عربي
///
/// #set text(fallback: false)
/// هذا عربي
/// ```
#[default(true)]
pub fallback: bool,
/// The desired font style.
///
/// When an italic style is requested and only an oblique one is available,
/// it is used. Similarly, the other way around, an italic style can stand
/// in for an oblique one. When neither an italic nor an oblique style is
/// available, Typst selects the normal style. Since most fonts are only
/// available either in an italic or oblique style, the difference between
/// italic and oblique style is rarely observable.
///
/// If you want to emphasize your text, you should do so using the
/// [emph]($func/emph) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the emphasis.
///
/// ```example
/// #text(font: "Linux Libertine", style: "italic")[Italic]
/// #text(font: "DejaVu Sans", style: "oblique")[Oblique]
/// ```
pub style: FontStyle,
/// The desired thickness of the font's glyphs. Accepts an integer between
/// `{100}` and `{900}` or one of the predefined weight names. When the
/// desired weight is not available, Typst selects the font from the family
/// that is closest in weight.
///
/// If you want to strongly emphasize your text, you should do so using the
/// [strong]($func/strong) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the strong
/// emphasis.
///
/// ```example
/// #set text(font: "IBM Plex Sans")
///
/// #text(weight: "light")[Light] \
/// #text(weight: "regular")[Regular] \
/// #text(weight: "medium")[Medium] \
/// #text(weight: 500)[Medium] \
/// #text(weight: "bold")[Bold]
/// ```
pub weight: FontWeight,
/// The desired width of the glyphs. Accepts a ratio between `{50%}` and
/// `{200%}`. When the desired width is not available, Typst selects the
/// font from the family that is closest in stretch. This will only stretch
/// the text if a condensed or expanded version of the font is available.
///
/// If you want to adjust the amount of space between characters instead of
/// stretching the glyphs itself, use the [`tracking`]($func/text.tracking)
/// property instead.
///
/// ```example
/// #text(stretch: 75%)[Condensed] \
/// #text(stretch: 100%)[Normal]
/// ```
pub stretch: FontStretch,
/// The size of the glyphs. This value forms the basis of the `em` unit:
/// `{1em}` is equivalent to the font size.
///
/// You can also give the font size itself in `em` units. Then, it is
/// relative to the previous font size.
///
/// ```example
/// #set text(size: 20pt)
/// very #text(1.5em)[big] text
/// ```
#[parse(args.named_or_find("size")?)]
#[fold]
#[default(Abs::pt(11.0))]
pub size: TextSize,
/// The glyph fill color.
///
/// ```example
/// #set text(fill: red)
/// This text is red.
/// ```
#[parse(args.named_or_find("fill")?)]
#[default(Color::BLACK.into())]
pub fill: Paint,
/// The amount of space that should be added between characters.
///
/// ```example
/// #set text(tracking: 1.5pt)
/// Distant text.
/// ```
#[resolve]
pub tracking: Length,
/// The amount of space between words.
///
/// Can be given as an absolute length, but also relative to the width of
/// the space character in the font.
///
/// If you want to adjust the amount of space between characters rather than
/// words, use the [`tracking`]($func/text.tracking) property instead.
///
/// ```example
/// #set text(spacing: 200%)
/// Text with distant words.
/// ```
#[resolve]
#[default(Rel::one())]
pub spacing: Rel<Length>,
/// An amount to shift the text baseline by.
///
/// ```example
/// A #text(baseline: 3pt)[lowered]
/// word.
/// ```
#[resolve]
pub baseline: Length,
/// Whether certain glyphs can hang over into the margin in justified text.
/// This can make justification visually more pleasing.
///
/// ```example
/// #set par(justify: true)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
///
/// #set text(overhang: false)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
/// ```
#[default(true)]
pub overhang: bool,
/// The top end of the conceptual frame around the text used for layout and
/// positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(top-edge: "ascender")
/// #rect(fill: aqua)[Typst]
///
/// #set text(top-edge: "cap-height")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(TopEdge::Metric(TopEdgeMetric::CapHeight))]
pub top_edge: TopEdge,
/// The bottom end of the conceptual frame around the text used for layout
/// and positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(bottom-edge: "baseline")
/// #rect(fill: aqua)[Typst]
///
/// #set text(bottom-edge: "descender")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(BottomEdge::Metric(BottomEdgeMetric::Baseline))]
pub bottom_edge: BottomEdge,
/// An [ISO 639-1/2/3 language code.](https://en.wikipedia.org/wiki/ISO_639)
///
/// Setting the correct language affects various parts of Typst:
///
/// - The text processing pipeline can make more informed choices.
/// - Hyphenation will use the correct patterns for the language.
/// - [Smart quotes]($func/smartquote) turns into the correct quotes for the
/// language.
/// - And all other things which are language-aware.
///
/// ```example
/// #set text(lang: "de")
/// #outline()
///
/// = Einleitung
/// In diesem Dokument,...
/// ```
#[default(Lang::ENGLISH)]
pub lang: Lang,
/// An [ISO 3166-1 alpha-2 region code.](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
///
/// This lets the text processing pipeline make more informed choices.
pub region: Option<Region>,
/// The OpenType writing script.
///
/// The combination of `{lang}` and `{script}` determine how font features,
/// such as glyph substitution, are implemented. Frequently the value is a
/// modified (all-lowercase) ISO 15924 script identifier, and the `math`
/// writing script is used for features appropriate for mathematical
/// symbols.
///
/// When set to `{auto}`, the default and recommended setting, an
/// appropriate script is chosen for each block of characters sharing a
/// common Unicode script property.
///
/// ```example
/// #set text(
/// font: "Linux Libertine",
/// size: 20pt,
/// )
///
/// #let scedilla = [Ş]
/// #scedilla // S with a cedilla
///
/// #set text(lang: "ro", script: "latn")
/// #scedilla // S with a subscript comma
///
/// #set text(lang: "ro", script: "grek")
/// #scedilla // S with a cedilla
/// ```
pub script: Smart<WritingScript>,
/// The dominant direction for text and inline objects. Possible values are:
///
/// - `{auto}`: Automatically infer the direction from the `lang` property.
/// - `{ltr}`: Layout text from left to right.
/// - `{rtl}`: Layout text from right to left.
///
/// When writing in right-to-left scripts like Arabic or Hebrew, you should
/// set the [text language]($func/text.lang) or direction. While individual
/// runs of text are automatically layouted in the correct direction,
/// setting the dominant direction gives the bidirectional reordering
/// algorithm the necessary information to correctly place punctuation and
/// inline objects. Furthermore, setting the direction affects the alignment
/// values `start` and `end`, which are equivalent to `left` and `right` in
/// `ltr` text and the other way around in `rtl` text.
///
/// If you set this to `rtl` and experience bugs or in some way bad looking
/// output, please do get in touch with us through the
/// [contact form](https://typst.app/contact) or our
/// [Discord server]($community/#discord)!
///
/// ```example
/// #set text(dir: rtl)
/// هذا عربي.
/// ```
#[resolve]
pub dir: TextDir,
/// Whether to hyphenate text to improve line breaking. When `{auto}`, text
/// will be hyphenated if and only if justification is enabled.
///
/// Setting the [text language]($func/text.lang) ensures that the correct
/// hyphenation patterns are used.
///
/// ```example
/// #set page(width: 200pt)
///
/// #set par(justify: true)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
///
/// #set text(hyphenate: false)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
/// ```
#[resolve]
pub hyphenate: Hyphenate,
/// Whether to apply kerning.
///
/// When enabled, specific letter pairings move closer together or further
/// apart for a more visually pleasing result. The example below
/// demonstrates how decreasing the gap between the "T" and "o" results in a
/// more natural look. Setting this to `{false}` disables kerning by turning
/// off the OpenType `kern` font feature.
///
/// ```example
/// #set text(size: 25pt)
/// Totally
///
/// #set text(kerning: false)
/// Totally
/// ```
#[default(true)]
pub kerning: bool,
/// Whether to apply stylistic alternates.
///
/// Sometimes fonts contain alternative glyphs for the same codepoint.
/// Setting this to `{true}` switches to these by enabling the OpenType
/// `salt` font feature.
///
/// ```example
/// #set text(
/// font: "IBM Plex Sans",
/// size: 20pt,
/// )
///
/// 0, a, g, ß
///
/// #set text(alternates: true)
/// 0, a, g, ß
/// ```
#[default(false)]
pub alternates: bool,
/// Which stylistic set to apply. Font designers can categorize alternative
/// glyphs forms into stylistic sets. As this value is highly font-specific,
/// you need to consult your font to know which sets are available. When set
/// to an integer between `{1}` and `{20}`, enables the corresponding
/// OpenType font feature from `ss01`,..., `ss20`.
pub stylistic_set: Option<StylisticSet>,
/// Whether standard ligatures are active.
///
/// Certain letter combinations like "fi" are often displayed as a single
/// merged glyph called a _ligature._ Setting this to `{false}` disables
/// these ligatures by turning off the OpenType `liga` and `clig` font
/// features.
///
/// ```example
/// #set text(size: 20pt)
/// A fine ligature.
///
/// #set text(ligatures: false)
/// A fine ligature.
/// ```
#[default(true)]
pub ligatures: bool,
/// Whether ligatures that should be used sparingly are active. Setting this
/// to `{true}` enables the OpenType `dlig` font feature.
#[default(false)]
pub discretionary_ligatures: bool,
/// Whether historical ligatures are active. Setting this to `{true}`
/// enables the OpenType `hlig` font feature.
#[default(false)]
pub historical_ligatures: bool,
/// Which kind of numbers / figures to select. When set to `{auto}`, the
/// default numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-type: "lining")
/// Number 9.
///
/// #set text(number-type: "old-style")
/// Number 9.
/// ```
pub number_type: Smart<NumberType>,
/// The width of numbers / figures. When set to `{auto}`, the default
/// numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-width: "proportional")
/// A 12 B 34. \
/// A 56 B 78.
///
/// #set text(number-width: "tabular")
/// A 12 B 34. \
/// A 56 B 78.
/// ```
pub number_width: Smart<NumberWidth>,
/// Whether to have a slash through the zero glyph. Setting this to `{true}`
/// enables the OpenType `zero` font feature.
///
/// ```example
/// 0, #text(slashed-zero: true)[0]
/// ```
#[default(false)]
pub slashed_zero: bool,
/// Whether to turn numbers into fractions. Setting this to `{true}`
/// enables the OpenType `frac` font feature.
///
/// It is not advisable to enable this property globally as it will mess
/// with all appearances of numbers after a slash (e.g., in URLs). Instead,
/// enable it locally when you want a fraction.
///
/// ```example
/// 1/2 \
/// #text(fractions: true)[1/2]
/// ```
#[default(false)]
pub fractions: bool,
/// Raw OpenType features to apply.
///
/// - If given an array of strings, sets the features identified by the
/// strings to `{1}`.
/// - If given a dictionary mapping to numbers, sets the features
/// identified by the keys to the values.
///
/// ```example
/// // Enable the `frac` feature manually.
/// #set text(features: ("frac",))
/// 1/2
/// ```
#[fold]
pub features: FontFeatures,
/// Content in which all text is styled according to the other arguments.
#[external]
#[required]
pub body: Content,
/// The text.
#[internal]
#[required]
pub text: EcoString,
/// A delta to apply on the font weight.
#[internal]
#[fold]
pub delta: Delta,
/// Whether the font style should be inverted.
#[internal]
#[fold]
#[default(false)]
pub emph: Toggle,
/// Decorative lines.
#[internal]
#[fold]
pub deco: Decoration,
/// A case transformation that should be applied to the text.
#[internal]
pub case: Option<Case>,
/// Whether small capital glyphs should be used. ("smcp")
#[internal]
#[default(false)]
pub smallcaps: bool,
}
impl TextElem {
/// Create a new packed text element.
pub fn packed(text: impl Into<EcoString>) -> Content {
Self::new(text.into()).pack()
}
}
impl Construct for TextElem {
fn construct(vm: &mut Vm, args: &mut Args) -> SourceResult<Content> {
// The text constructor is special: It doesn't create a text element.
// Instead, it leaves the passed argument structurally unchanged, but
// styles all text in it.
let styles = Self::set(vm, args)?;
let body = args.expect::<Content>("body")?;
Ok(body.styled_with_map(styles))
}
}
impl PlainText for TextElem {
fn plain_text(&self, text: &mut EcoString) {
text.push_str(&self.text());
}
}
/// A lowercased font family like "arial".
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct FontFamily(EcoString);
impl FontFamily {
/// Create a named font family variant.
pub fn new(string: &str) -> Self {
Self(string.to_lowercase().into())
}
/// The lowercased family name.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl Debug for FontFamily {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
cast! {
FontFamily,
self => self.0.into_value(),
string: EcoString => Self::new(&string),
}
/// Font family fallback list.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontList(pub Vec<FontFamily>);
impl IntoIterator for FontList {
type IntoIter = std::vec::IntoIter<FontFamily>;
type Item = FontFamily;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
cast! {
FontList,
self => if self.0.len() == 1 {
self.0.into_iter().next().unwrap().0.into_value()
} else {
self.0.into_value()
},
family: FontFamily => Self(vec![family]),
values: Array => Self(values.into_iter().map(|v| v.cast()).collect::<StrResult<_>>()?),
}
/// The size of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextSize(pub Length);
impl Fold for TextSize {
type Output = Abs;
fn fold(self, outer: Self::Output) -> Self::Output {
self.0.em.at(outer) + self.0.abs
}
}
cast! {
TextSize,
self => self.0.into_value(),
v: Length => Self(v),
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum TopEdge {
/// An edge specified via font metrics or bounding box.
Metric(TopEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl TopEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(TopEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
TopEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_max)).resolve(styles))
.unwrap_or_default()
}
}
TopEdge::Length(length) => length.resolve(styles),
}
}
}
cast! {
TopEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: TopEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum TopEdgeMetric {
/// The font's ascender, which typically exceeds the height of all glyphs.
Ascender,
/// The approximate height of uppercase letters.
CapHeight,
/// The approximate height of non-ascending lowercase letters.
XHeight,
/// The baseline on which the letters rest.
Baseline,
/// The top edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for TopEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Ascender => Ok(VerticalFontMetric::Ascender),
Self::CapHeight => Ok(VerticalFontMetric::CapHeight),
Self::XHeight => Ok(VerticalFontMetric::XHeight),
Self::Baseline => Ok(VerticalFontMetric::Baseline),
_ => Err(()),
}
}
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum BottomEdge {
/// An edge specified via font metrics or bounding box.
Metric(BottomEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl BottomEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(BottomEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
BottomEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_min)).resolve(styles))
.unwrap_or_default()
}
}
BottomEdge::Length(length) => length.resolve(styles),
}
}
}
cast! {
BottomEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: BottomEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the bottom edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum BottomEdgeMetric {
/// The baseline on which the letters rest.
Baseline,
/// The font's descender, which typically exceeds the depth of all glyphs.
Descender,
/// The bottom edge of the glyph's bounding box.
| define | identifier_name |
|
rust_gtest_interop.rs | // Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::pin::Pin;
/// Use `prelude:::*` to get access to all macros defined in this crate.
pub mod prelude {
// The #[extern_test_suite("cplusplus::Type") macro.
pub use gtest_attribute::extern_test_suite;
// The #[gtest(TestSuite, TestName)] macro.
pub use gtest_attribute::gtest;
// Gtest expectation macros, which should be used to verify test expectations.
// These replace the standard practice of using assert/panic in Rust tests
// which would crash the test binary.
pub use crate::expect_eq;
pub use crate::expect_false; | pub use crate::expect_ge;
pub use crate::expect_gt;
pub use crate::expect_le;
pub use crate::expect_lt;
pub use crate::expect_ne;
pub use crate::expect_true;
}
// The gtest_attribute proc-macro crate makes use of small_ctor, with a path
// through this crate here to ensure it's available.
#[doc(hidden)]
pub extern crate small_ctor;
/// A marker trait that promises the Rust type is an FFI wrapper around a C++
/// class which subclasses `testing::Test`. In particular, casting a
/// `testing::Test` pointer to the implementing class type is promised to be
/// valid.
///
/// Implement this trait with the `#[extern_test_suite]` macro:
/// ```rs
/// #[extern_test_suite("cpp::type::wrapped::by::Foo")
/// unsafe impl TestSuite for Foo {}
/// ```
pub unsafe trait TestSuite {
/// Gives the Gtest factory function on the C++ side which constructs the
/// C++ class for which the implementing Rust type is an FFI wrapper.
#[doc(hidden)]
fn gtest_factory_fn_ptr() -> GtestFactoryFunction;
}
/// Matches the C++ type `rust_gtest_interop::GtestFactoryFunction`, with the
/// `testing::Test` type erased to `OpaqueTestingTest`.
///
/// We replace `testing::Test*` with `OpaqueTestingTest` because but we don't
/// know that C++ type in Rust, as we don't have a Rust generator giving access
/// to that type.
#[doc(hidden)]
pub type GtestFactoryFunction = unsafe extern "C" fn(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
/// Opaque replacement of a C++ `testing::Test` type, which can only be used as
/// a pointer, since its size is incorrect. Only appears in the
/// GtestFactoryFunction signature, which is a function pointer that passed to
/// C++, and never run from within Rust.
///
/// See https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
///
/// TODO(danakj): If there was a way, without making references to it into wide
/// pointers, we should make this type be!Sized.
#[repr(C)]
#[doc(hidden)]
pub struct OpaqueTestingTest {
data: [u8; 0],
marker: std::marker::PhantomData<(*mut u8, std::marker::PhantomPinned)>,
}
#[doc(hidden)]
pub trait TestResult {
fn into_error_message(self) -> Option<String>;
}
impl TestResult for () {
fn into_error_message(self) -> Option<String> {
None
}
}
// This impl requires an `Error` not just a `String` so that in the future we
// could print things like the backtrace too (though that field is currently
// unstable).
impl<E: Into<Box<dyn std::error::Error>>> TestResult for std::result::Result<(), E> {
fn into_error_message(self) -> Option<String> {
match self {
Ok(_) => None,
Err(e) => Some(format!("Test returned error: {}", e.into())),
}
}
}
// Internals used by code generated from the gtest-attriute proc-macro. Should
// not be used by human-written code.
#[doc(hidden)]
pub mod __private {
use super::{GtestFactoryFunction, OpaqueTestingTest, Pin};
/// Rust wrapper around the same C++ method.
///
/// We have a wrapper to convert the file name into a C++-friendly string,
/// and the line number into a C++-friendly signed int.
///
/// TODO(crbug.com/1298175): We should be able to receive a C++-friendly
/// file path.
///
/// TODO(danakj): We should be able to pass a `c_int` directly to C++:
/// https://github.com/dtolnay/cxx/issues/1015.
pub fn add_failure_at(file: &'static str, line: u32, message: &str) {
let null_term_file = std::ffi::CString::new(make_canonical_file_path(file)).unwrap();
let null_term_message = std::ffi::CString::new(message).unwrap();
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_add_failure_at().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
file: *const std::ffi::c_char,
line: i32,
message: *const std::ffi::c_char,
);
}
unsafe {
_ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
null_term_file.as_ptr(),
line.try_into().unwrap_or(-1),
null_term_message.as_ptr(),
)
}
}
/// Turn a file!() string for a source file into a path from the root of the
/// source tree.
pub fn make_canonical_file_path(file: &str) -> String {
// The path of the file here is relative to and prefixed with the crate root's
// source file with the current directory being the build's output
// directory. So for a generated crate root at gen/foo/, the file path
// would look like `gen/foo/../../../../real/path.rs`. The last two `../
// ` move up from the build output directory to the source tree root. As such,
// we need to strip pairs of `something/../` until there are none left, and
// remove the remaining `../` path components up to the source tree
// root.
//
// Note that std::fs::canonicalize() does not work here since it requires the
// file to exist, but we're working with a relative path that is rooted
// in the build directory, not the current directory. We could try to
// get the path to the build directory.. but this is simple enough.
let (keep_rev, _) = std::path::Path::new(file).iter().rev().fold(
(Vec::new(), 0),
// Build the set of path components we want to keep, which we do by keeping a count of
// the `..` components and then dropping stuff that comes before them.
|(mut keep, dotdot_count), path_component| {
if path_component == ".." {
// The `..` component will skip the next downward component.
(keep, dotdot_count + 1)
} else if dotdot_count > 0 {
// Skip the component as we drop it with `..` later in the path.
(keep, dotdot_count - 1)
} else {
// Keep this component.
keep.push(path_component);
(keep, dotdot_count)
}
},
);
// Reverse the path components, join them together, and write them into a
// string.
keep_rev
.into_iter()
.rev()
.fold(std::path::PathBuf::new(), |path, path_component| path.join(path_component))
.to_string_lossy()
.to_string()
}
/// Wrapper that calls C++ rust_gtest_default_factory().
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers: https://github.com/dtolnay/cxx/issues/1011.
pub unsafe extern "C" fn rust_gtest_default_factory(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest> {
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_default_factory().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
}
unsafe { _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(f) }
}
/// Wrapper that calls C++ rust_gtest_add_test().
///
/// Note that the `factory` parameter is actually a C++ function pointer.
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers nor passing `*const c_char`: https://github.com/dtolnay/cxx/issues/1011 and
/// https://github.com/dtolnay/cxx/issues/1015.
unsafe fn rust_gtest_add_test(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
) {
extern "C" {
/// The C++ mangled name for
/// rust_gtest_interop::rust_gtest_add_test(). This comes from
/// `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
);
}
unsafe {
_ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory,
run_test_fn,
test_suite_name,
test_name,
file,
line,
)
}
}
/// Information used to register a function pointer as a test with the C++
/// Gtest framework.
pub struct TestRegistration {
pub func: extern "C" fn(suite: Pin<&mut OpaqueTestingTest>),
// TODO(danakj): These a C-String-Literals. Maybe we should expose that as a type
// somewhere.
pub test_suite_name: &'static [std::os::raw::c_char],
pub test_name: &'static [std::os::raw::c_char],
pub file: &'static [std::os::raw::c_char],
pub line: u32,
pub factory: GtestFactoryFunction,
}
/// Register a given test function with the C++ Gtest framework.
///
/// This function is called from static initializers. It may only be called
/// from the main thread, before main() is run. It may not panic, or
/// call anything that may panic.
pub fn register_test(r: TestRegistration) {
let line = r.line.try_into().unwrap_or(-1);
// SAFETY: The `factory` parameter to rust_gtest_add_test() must be a C++
// function that returns a `testing::Test*` disguised as a
// `OpaqueTestingTest`. The #[gtest] macro will use
// `rust_gtest_interop::rust_gtest_default_factory()` by default.
unsafe {
rust_gtest_add_test(
r.factory,
r.func,
r.test_suite_name.as_ptr(),
r.test_name.as_ptr(),
r.file.as_ptr(),
line,
)
};
}
}
mod expect_macros; | random_line_split |
|
rust_gtest_interop.rs | // Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::pin::Pin;
/// Use `prelude:::*` to get access to all macros defined in this crate.
pub mod prelude {
// The #[extern_test_suite("cplusplus::Type") macro.
pub use gtest_attribute::extern_test_suite;
// The #[gtest(TestSuite, TestName)] macro.
pub use gtest_attribute::gtest;
// Gtest expectation macros, which should be used to verify test expectations.
// These replace the standard practice of using assert/panic in Rust tests
// which would crash the test binary.
pub use crate::expect_eq;
pub use crate::expect_false;
pub use crate::expect_ge;
pub use crate::expect_gt;
pub use crate::expect_le;
pub use crate::expect_lt;
pub use crate::expect_ne;
pub use crate::expect_true;
}
// The gtest_attribute proc-macro crate makes use of small_ctor, with a path
// through this crate here to ensure it's available.
#[doc(hidden)]
pub extern crate small_ctor;
/// A marker trait that promises the Rust type is an FFI wrapper around a C++
/// class which subclasses `testing::Test`. In particular, casting a
/// `testing::Test` pointer to the implementing class type is promised to be
/// valid.
///
/// Implement this trait with the `#[extern_test_suite]` macro:
/// ```rs
/// #[extern_test_suite("cpp::type::wrapped::by::Foo")
/// unsafe impl TestSuite for Foo {}
/// ```
pub unsafe trait TestSuite {
/// Gives the Gtest factory function on the C++ side which constructs the
/// C++ class for which the implementing Rust type is an FFI wrapper.
#[doc(hidden)]
fn gtest_factory_fn_ptr() -> GtestFactoryFunction;
}
/// Matches the C++ type `rust_gtest_interop::GtestFactoryFunction`, with the
/// `testing::Test` type erased to `OpaqueTestingTest`.
///
/// We replace `testing::Test*` with `OpaqueTestingTest` because but we don't
/// know that C++ type in Rust, as we don't have a Rust generator giving access
/// to that type.
#[doc(hidden)]
pub type GtestFactoryFunction = unsafe extern "C" fn(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
/// Opaque replacement of a C++ `testing::Test` type, which can only be used as
/// a pointer, since its size is incorrect. Only appears in the
/// GtestFactoryFunction signature, which is a function pointer that passed to
/// C++, and never run from within Rust.
///
/// See https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
///
/// TODO(danakj): If there was a way, without making references to it into wide
/// pointers, we should make this type be!Sized.
#[repr(C)]
#[doc(hidden)]
pub struct | {
data: [u8; 0],
marker: std::marker::PhantomData<(*mut u8, std::marker::PhantomPinned)>,
}
#[doc(hidden)]
pub trait TestResult {
fn into_error_message(self) -> Option<String>;
}
impl TestResult for () {
fn into_error_message(self) -> Option<String> {
None
}
}
// This impl requires an `Error` not just a `String` so that in the future we
// could print things like the backtrace too (though that field is currently
// unstable).
impl<E: Into<Box<dyn std::error::Error>>> TestResult for std::result::Result<(), E> {
fn into_error_message(self) -> Option<String> {
match self {
Ok(_) => None,
Err(e) => Some(format!("Test returned error: {}", e.into())),
}
}
}
// Internals used by code generated from the gtest-attriute proc-macro. Should
// not be used by human-written code.
#[doc(hidden)]
pub mod __private {
use super::{GtestFactoryFunction, OpaqueTestingTest, Pin};
/// Rust wrapper around the same C++ method.
///
/// We have a wrapper to convert the file name into a C++-friendly string,
/// and the line number into a C++-friendly signed int.
///
/// TODO(crbug.com/1298175): We should be able to receive a C++-friendly
/// file path.
///
/// TODO(danakj): We should be able to pass a `c_int` directly to C++:
/// https://github.com/dtolnay/cxx/issues/1015.
pub fn add_failure_at(file: &'static str, line: u32, message: &str) {
let null_term_file = std::ffi::CString::new(make_canonical_file_path(file)).unwrap();
let null_term_message = std::ffi::CString::new(message).unwrap();
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_add_failure_at().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
file: *const std::ffi::c_char,
line: i32,
message: *const std::ffi::c_char,
);
}
unsafe {
_ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
null_term_file.as_ptr(),
line.try_into().unwrap_or(-1),
null_term_message.as_ptr(),
)
}
}
/// Turn a file!() string for a source file into a path from the root of the
/// source tree.
pub fn make_canonical_file_path(file: &str) -> String {
// The path of the file here is relative to and prefixed with the crate root's
// source file with the current directory being the build's output
// directory. So for a generated crate root at gen/foo/, the file path
// would look like `gen/foo/../../../../real/path.rs`. The last two `../
// ` move up from the build output directory to the source tree root. As such,
// we need to strip pairs of `something/../` until there are none left, and
// remove the remaining `../` path components up to the source tree
// root.
//
// Note that std::fs::canonicalize() does not work here since it requires the
// file to exist, but we're working with a relative path that is rooted
// in the build directory, not the current directory. We could try to
// get the path to the build directory.. but this is simple enough.
let (keep_rev, _) = std::path::Path::new(file).iter().rev().fold(
(Vec::new(), 0),
// Build the set of path components we want to keep, which we do by keeping a count of
// the `..` components and then dropping stuff that comes before them.
|(mut keep, dotdot_count), path_component| {
if path_component == ".." {
// The `..` component will skip the next downward component.
(keep, dotdot_count + 1)
} else if dotdot_count > 0 {
// Skip the component as we drop it with `..` later in the path.
(keep, dotdot_count - 1)
} else {
// Keep this component.
keep.push(path_component);
(keep, dotdot_count)
}
},
);
// Reverse the path components, join them together, and write them into a
// string.
keep_rev
.into_iter()
.rev()
.fold(std::path::PathBuf::new(), |path, path_component| path.join(path_component))
.to_string_lossy()
.to_string()
}
/// Wrapper that calls C++ rust_gtest_default_factory().
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers: https://github.com/dtolnay/cxx/issues/1011.
pub unsafe extern "C" fn rust_gtest_default_factory(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest> {
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_default_factory().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
}
unsafe { _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(f) }
}
/// Wrapper that calls C++ rust_gtest_add_test().
///
/// Note that the `factory` parameter is actually a C++ function pointer.
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers nor passing `*const c_char`: https://github.com/dtolnay/cxx/issues/1011 and
/// https://github.com/dtolnay/cxx/issues/1015.
unsafe fn rust_gtest_add_test(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
) {
extern "C" {
/// The C++ mangled name for
/// rust_gtest_interop::rust_gtest_add_test(). This comes from
/// `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
);
}
unsafe {
_ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory,
run_test_fn,
test_suite_name,
test_name,
file,
line,
)
}
}
/// Information used to register a function pointer as a test with the C++
/// Gtest framework.
pub struct TestRegistration {
pub func: extern "C" fn(suite: Pin<&mut OpaqueTestingTest>),
// TODO(danakj): These a C-String-Literals. Maybe we should expose that as a type
// somewhere.
pub test_suite_name: &'static [std::os::raw::c_char],
pub test_name: &'static [std::os::raw::c_char],
pub file: &'static [std::os::raw::c_char],
pub line: u32,
pub factory: GtestFactoryFunction,
}
/// Register a given test function with the C++ Gtest framework.
///
/// This function is called from static initializers. It may only be called
/// from the main thread, before main() is run. It may not panic, or
/// call anything that may panic.
pub fn register_test(r: TestRegistration) {
let line = r.line.try_into().unwrap_or(-1);
// SAFETY: The `factory` parameter to rust_gtest_add_test() must be a C++
// function that returns a `testing::Test*` disguised as a
// `OpaqueTestingTest`. The #[gtest] macro will use
// `rust_gtest_interop::rust_gtest_default_factory()` by default.
unsafe {
rust_gtest_add_test(
r.factory,
r.func,
r.test_suite_name.as_ptr(),
r.test_name.as_ptr(),
r.file.as_ptr(),
line,
)
};
}
}
mod expect_macros;
| OpaqueTestingTest | identifier_name |
rust_gtest_interop.rs | // Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::pin::Pin;
/// Use `prelude:::*` to get access to all macros defined in this crate.
pub mod prelude {
// The #[extern_test_suite("cplusplus::Type") macro.
pub use gtest_attribute::extern_test_suite;
// The #[gtest(TestSuite, TestName)] macro.
pub use gtest_attribute::gtest;
// Gtest expectation macros, which should be used to verify test expectations.
// These replace the standard practice of using assert/panic in Rust tests
// which would crash the test binary.
pub use crate::expect_eq;
pub use crate::expect_false;
pub use crate::expect_ge;
pub use crate::expect_gt;
pub use crate::expect_le;
pub use crate::expect_lt;
pub use crate::expect_ne;
pub use crate::expect_true;
}
// The gtest_attribute proc-macro crate makes use of small_ctor, with a path
// through this crate here to ensure it's available.
#[doc(hidden)]
pub extern crate small_ctor;
/// A marker trait that promises the Rust type is an FFI wrapper around a C++
/// class which subclasses `testing::Test`. In particular, casting a
/// `testing::Test` pointer to the implementing class type is promised to be
/// valid.
///
/// Implement this trait with the `#[extern_test_suite]` macro:
/// ```rs
/// #[extern_test_suite("cpp::type::wrapped::by::Foo")
/// unsafe impl TestSuite for Foo {}
/// ```
pub unsafe trait TestSuite {
/// Gives the Gtest factory function on the C++ side which constructs the
/// C++ class for which the implementing Rust type is an FFI wrapper.
#[doc(hidden)]
fn gtest_factory_fn_ptr() -> GtestFactoryFunction;
}
/// Matches the C++ type `rust_gtest_interop::GtestFactoryFunction`, with the
/// `testing::Test` type erased to `OpaqueTestingTest`.
///
/// We replace `testing::Test*` with `OpaqueTestingTest` because but we don't
/// know that C++ type in Rust, as we don't have a Rust generator giving access
/// to that type.
#[doc(hidden)]
pub type GtestFactoryFunction = unsafe extern "C" fn(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
/// Opaque replacement of a C++ `testing::Test` type, which can only be used as
/// a pointer, since its size is incorrect. Only appears in the
/// GtestFactoryFunction signature, which is a function pointer that passed to
/// C++, and never run from within Rust.
///
/// See https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
///
/// TODO(danakj): If there was a way, without making references to it into wide
/// pointers, we should make this type be!Sized.
#[repr(C)]
#[doc(hidden)]
pub struct OpaqueTestingTest {
data: [u8; 0],
marker: std::marker::PhantomData<(*mut u8, std::marker::PhantomPinned)>,
}
#[doc(hidden)]
pub trait TestResult {
fn into_error_message(self) -> Option<String>;
}
impl TestResult for () {
fn into_error_message(self) -> Option<String> {
None
}
}
// This impl requires an `Error` not just a `String` so that in the future we
// could print things like the backtrace too (though that field is currently
// unstable).
impl<E: Into<Box<dyn std::error::Error>>> TestResult for std::result::Result<(), E> {
fn into_error_message(self) -> Option<String> |
}
// Internals used by code generated from the gtest-attriute proc-macro. Should
// not be used by human-written code.
#[doc(hidden)]
pub mod __private {
use super::{GtestFactoryFunction, OpaqueTestingTest, Pin};
/// Rust wrapper around the same C++ method.
///
/// We have a wrapper to convert the file name into a C++-friendly string,
/// and the line number into a C++-friendly signed int.
///
/// TODO(crbug.com/1298175): We should be able to receive a C++-friendly
/// file path.
///
/// TODO(danakj): We should be able to pass a `c_int` directly to C++:
/// https://github.com/dtolnay/cxx/issues/1015.
pub fn add_failure_at(file: &'static str, line: u32, message: &str) {
let null_term_file = std::ffi::CString::new(make_canonical_file_path(file)).unwrap();
let null_term_message = std::ffi::CString::new(message).unwrap();
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_add_failure_at().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
file: *const std::ffi::c_char,
line: i32,
message: *const std::ffi::c_char,
);
}
unsafe {
_ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
null_term_file.as_ptr(),
line.try_into().unwrap_or(-1),
null_term_message.as_ptr(),
)
}
}
/// Turn a file!() string for a source file into a path from the root of the
/// source tree.
pub fn make_canonical_file_path(file: &str) -> String {
// The path of the file here is relative to and prefixed with the crate root's
// source file with the current directory being the build's output
// directory. So for a generated crate root at gen/foo/, the file path
// would look like `gen/foo/../../../../real/path.rs`. The last two `../
// ` move up from the build output directory to the source tree root. As such,
// we need to strip pairs of `something/../` until there are none left, and
// remove the remaining `../` path components up to the source tree
// root.
//
// Note that std::fs::canonicalize() does not work here since it requires the
// file to exist, but we're working with a relative path that is rooted
// in the build directory, not the current directory. We could try to
// get the path to the build directory.. but this is simple enough.
let (keep_rev, _) = std::path::Path::new(file).iter().rev().fold(
(Vec::new(), 0),
// Build the set of path components we want to keep, which we do by keeping a count of
// the `..` components and then dropping stuff that comes before them.
|(mut keep, dotdot_count), path_component| {
if path_component == ".." {
// The `..` component will skip the next downward component.
(keep, dotdot_count + 1)
} else if dotdot_count > 0 {
// Skip the component as we drop it with `..` later in the path.
(keep, dotdot_count - 1)
} else {
// Keep this component.
keep.push(path_component);
(keep, dotdot_count)
}
},
);
// Reverse the path components, join them together, and write them into a
// string.
keep_rev
.into_iter()
.rev()
.fold(std::path::PathBuf::new(), |path, path_component| path.join(path_component))
.to_string_lossy()
.to_string()
}
/// Wrapper that calls C++ rust_gtest_default_factory().
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers: https://github.com/dtolnay/cxx/issues/1011.
pub unsafe extern "C" fn rust_gtest_default_factory(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest> {
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_default_factory().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
}
unsafe { _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(f) }
}
/// Wrapper that calls C++ rust_gtest_add_test().
///
/// Note that the `factory` parameter is actually a C++ function pointer.
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers nor passing `*const c_char`: https://github.com/dtolnay/cxx/issues/1011 and
/// https://github.com/dtolnay/cxx/issues/1015.
unsafe fn rust_gtest_add_test(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
) {
extern "C" {
/// The C++ mangled name for
/// rust_gtest_interop::rust_gtest_add_test(). This comes from
/// `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
);
}
unsafe {
_ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory,
run_test_fn,
test_suite_name,
test_name,
file,
line,
)
}
}
/// Information used to register a function pointer as a test with the C++
/// Gtest framework.
pub struct TestRegistration {
pub func: extern "C" fn(suite: Pin<&mut OpaqueTestingTest>),
// TODO(danakj): These a C-String-Literals. Maybe we should expose that as a type
// somewhere.
pub test_suite_name: &'static [std::os::raw::c_char],
pub test_name: &'static [std::os::raw::c_char],
pub file: &'static [std::os::raw::c_char],
pub line: u32,
pub factory: GtestFactoryFunction,
}
/// Register a given test function with the C++ Gtest framework.
///
/// This function is called from static initializers. It may only be called
/// from the main thread, before main() is run. It may not panic, or
/// call anything that may panic.
pub fn register_test(r: TestRegistration) {
let line = r.line.try_into().unwrap_or(-1);
// SAFETY: The `factory` parameter to rust_gtest_add_test() must be a C++
// function that returns a `testing::Test*` disguised as a
// `OpaqueTestingTest`. The #[gtest] macro will use
// `rust_gtest_interop::rust_gtest_default_factory()` by default.
unsafe {
rust_gtest_add_test(
r.factory,
r.func,
r.test_suite_name.as_ptr(),
r.test_name.as_ptr(),
r.file.as_ptr(),
line,
)
};
}
}
mod expect_macros;
| {
match self {
Ok(_) => None,
Err(e) => Some(format!("Test returned error: {}", e.into())),
}
} | identifier_body |
main.rs | //This project was inspired by https://github.com/jkusner/CACBarcode/blob/master/cacbarcode.py
extern crate base_custom; use base_custom::BaseCustom;
extern crate chrono; use chrono::prelude::*;
extern crate time; use time::Duration;
fn main() {
if std::env::args().count() > 1 {
println!("For security, the barcodes should only be passed via stdin, not as arguments.");
std::process::exit(1);
}
println!("Common Access Cards have two barcodes.");
println!("One the front (PDF417), and one the back (Code39).");
println!("Get an application that can read a PDF417 barcode.");
println!("Copy and paste it into here, and I will decode it.");
println!("The decoded info will only be presented here, and will not be saved.");
println!();
use std::io::prelude::*;
let stdin = std::io::stdin();
for line in stdin.lock().lines() {
println!("{}", decode(line.unwrap()));
}
}
fn decode(data: String) -> String {
match data.len() {
18 => return decode_code39(data),
88 | 89 => return decode_pdf217(data),
_ => return format!("Incorrect barcode length: {}. Make sure to include all spaces.", data.len()),
}
}
fn decode_pdf217(data: String) -> String {
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let base_time = Utc.ymd(1000, 1, 1);
let mut data_chars = data.chars();
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "PDF217".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' | 'N' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
println!("1");
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
println!("2");
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
println!("3");
//First Name
out.push(("First Name", data_chars.by_ref().take(20).collect::<String>()));
//Last Name
out.push(("Last Name", data_chars.by_ref().take(26).collect::<String>()));
//Date of Birth
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Date of Birth", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Personnel Entitlement Condition Type
let pect = (data_chars.next().unwrap(), data_chars.next().unwrap());
out.push(("Personnel Entitlement Condition Type", lookup_pect(pect)));
//Rank
out.push(("Rank", data_chars.by_ref().take(6).collect::<String>()));
//Pay Plan Code
out.push(("Pay Plan Code", data_chars.by_ref().take(2).collect::<String>()));
//Pay Plan Grade Code
out.push(("Pay Plan Grade Code", data_chars.by_ref().take(2).collect::<String>()));
//Card Issue Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Issue Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Expiration Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Expiration Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
if data.len() == 89 {
//Middle Initial
let initial = data_chars.next().unwrap();
out.push(("Middle Initial", initial.to_string()));
}
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn decode_code39(data: String) -> String {
let mut data_chars = data.chars();
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "Code39".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap()))); | //Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn lookup_pdt(pdt: char) -> String {
match pdt {
'S' => "Social Security Number (SSN)".to_string(),
'N' => "9 digits, not valid SSN".to_string(),
'P' => "Special code before SSNs".to_string(),
'D' => "Temporary Identifier Number (TIN)".to_string(),
'F' => "Foreign Identifier Number (FIN)".to_string(),
'T' => "Test (858 series)".to_string(),
'I' => "Individual Taxpayer Identification Number".to_string(),
_ => format!("Unknown Type {}", pdt),
}
}
fn lookup_ppc(ppc: char) -> String {
match ppc {
'A' => "Active Duty member".to_string(),
'B' => "Presidential Appointee".to_string(),
'C' => "DoD civil service employee".to_string(),
'D' => "100% disabled American veteran".to_string(),
'E' => "DoD contract employee".to_string(),
'F' => "Former member".to_string(),
'N' | 'G' => "National Guard member".to_string(),
'H' => "Medal of Honor recipient".to_string(),
'I' => "Non-DoD Civil Service Employee".to_string(),
'J' => "Academy student".to_string(),
'K' => "non-appropriated fund (NAF) DoD employee".to_string(),
'L' => "Lighthouse service".to_string(),
'M' => "Non-Government agency personnel".to_string(),
'O' => "Non-DoD contract employee".to_string(),
'Q' => "Reserve retiree not yet eligible for retired pay".to_string(),
'R' => "Retired Uniformed Service member eligible for retired pay".to_string(),
'V' | 'S' => "Reserve member".to_string(),
'T' => "Foreign military member".to_string(),
'U' => "Foreign national employee".to_string(),
'W' => "DoD Beneficiary".to_string(),
'Y' => "Retired DoD Civil Service Employees".to_string(),
_ => format!("Unknown Type {}", ppc),
}
}
fn lookup_branch(branch: char) -> String {
match branch {
'A' => "USA".to_string(),
'C' => "USCG".to_string(),
'D' => "DOD".to_string(),
'F' => "USAF".to_string(),
'H' => "USPHS".to_string(),
'M' => "USMC".to_string(),
'N' => "USN".to_string(),
'O' => "NOAA".to_string(),
'1' => "Foreign Army".to_string(),
'2' => "Foreign Navy".to_string(),
'3' => "Foreign Marine Corps".to_string(),
'4' => "Foreign Air Force".to_string(),
'X' => "Other".to_string(),
_ => format!("Unknown Type {}", branch),
}
}
fn lookup_pect(pect: (char, char)) -> String {
match pect {
('0', '1') => "On Active Duty. Segment condition.".to_string(),
('0', '2') => "Mobilization. Segment condition.".to_string(),
('0', '3') => "On appellate leave. Segment condition.".to_string(),
('0', '4') => "Military prisoner. Segment condition.".to_string(),
('0', '5') => "POW/MIA. Segment condition.".to_string(),
('0', '6') => "Separated from Selected Reserve. Event condition.".to_string(),
('0', '7') => "Declared permanently disabled after temporary disability period. Event condition.".to_string(),
('0', '8') => "On non-CONUS assignment. Segment condition.".to_string(),
('0', '9') => "Living in Guam or Puerto Rico. Segment condition.".to_string(),
('1', '0') => "Living in government quarters. Segment condition.".to_string(),
('1', '1') => "Death determined to be related to an injury, illness, or disease while on Active duty for training or while traveling to or from a place of duty. Event condition.".to_string(),
('1', '2') => "Discharged due to misconduct involving family member abuse. (Sponsors who are eligible for retirement.) Segment condition.".to_string(),
('1', '3') => "Granted retired pay. Event condition.".to_string(),
('1', '4') => "DoD sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '5') => "DoD non-sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '6') => "DoD sponsored overseas. Segment condition.".to_string(),
('1', '7') => "Deserter. Segment condition.".to_string(),
('1', '8') => "Discharged due to misconduct involving family member abuse. (Sponsors who are not eligible for retirement.) Segment condition.".to_string(),
('1', '9') => "Reservist who dies after receiving their 20 year letter. Event condition.".to_string(),
('2', '0') => "Transitional assistance (TA-30). Segment condition.".to_string(),
('2', '1') => "Transitional assistance (TA-Res). Segment condition.".to_string(),
('2', '2') => "Transitional assistance (TA-60). Segment condition.".to_string(),
('2', '3') => "Transitional assistance (TA-120). Segment condition.".to_string(),
('2', '4') => "Transitional assistance (SSB program). Segment condition.".to_string(),
('2', '5') => "Transitional assistance (VSI program). Segment condition.".to_string(),
('2', '6') => "Transitional assistance (composite). Segment condition.".to_string(),
('2', '7') => "Senior Executive Service (SES).".to_string(),
('2', '8') => "Emergency Essential - overseas only.".to_string(),
('2', '9') => "Emergency Essential - CONUS.".to_string(),
('3', '0') => "Emergency Essential - CONUS in living quarters, living on base, and not drawing a basic allowance for quarters, serving in an emergency essential capacity.".to_string(),
('3', '1') => "Reserve Component TA-120 Reserve Component Transition Assistance TA 120 (Jan 1, 2002 or later).".to_string(),
('3', '2') => "On MSC owned and operated vessels Deployed to foreign countries on Military Sealift Command owned and operated vessels. Segment condition.".to_string(),
('3', '3') => "Guard/Reserve Alert Notification Period.".to_string(),
('3', '4') | ('3', '5') => "Reserve Component TA-180 - 180 days TAMPS for reserve return from named contingencies.".to_string(),
('3', '6') | ('3', '7') => "TA-180 - 180 days TAMP for involuntary separation.".to_string(),
('3', '8') => "Living in Government Quarters in Guam or Puerto Rico, Living on base and not drawing an allowance for quarters in Guam or Puerto Rico.".to_string(),
('3', '9') => "Reserve Component TA-180 - TAMP - Mobilized for Contingency.".to_string(),
('4', '0') => "TA-180 TAMP - SPD Code Separation.".to_string(),
('4', '1') => "TA-180 - TAMP - Stop/Loss Separation.".to_string(),
('4', '2') => "DoD Non-Sponsored Overseas - Foreign Military personnel serving OCONUS not sponsored by DoD.".to_string(),
_ => format!("Unknown Type {}{}", pect.0, pect.1),
}
} | random_line_split |
|
main.rs | //This project was inspired by https://github.com/jkusner/CACBarcode/blob/master/cacbarcode.py
extern crate base_custom; use base_custom::BaseCustom;
extern crate chrono; use chrono::prelude::*;
extern crate time; use time::Duration;
fn main() {
if std::env::args().count() > 1 {
println!("For security, the barcodes should only be passed via stdin, not as arguments.");
std::process::exit(1);
}
println!("Common Access Cards have two barcodes.");
println!("One the front (PDF417), and one the back (Code39).");
println!("Get an application that can read a PDF417 barcode.");
println!("Copy and paste it into here, and I will decode it.");
println!("The decoded info will only be presented here, and will not be saved.");
println!();
use std::io::prelude::*;
let stdin = std::io::stdin();
for line in stdin.lock().lines() {
println!("{}", decode(line.unwrap()));
}
}
fn decode(data: String) -> String {
match data.len() {
18 => return decode_code39(data),
88 | 89 => return decode_pdf217(data),
_ => return format!("Incorrect barcode length: {}. Make sure to include all spaces.", data.len()),
}
}
fn decode_pdf217(data: String) -> String {
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let base_time = Utc.ymd(1000, 1, 1);
let mut data_chars = data.chars();
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "PDF217".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' | 'N' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
println!("1");
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
println!("2");
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
println!("3");
//First Name
out.push(("First Name", data_chars.by_ref().take(20).collect::<String>()));
//Last Name
out.push(("Last Name", data_chars.by_ref().take(26).collect::<String>()));
//Date of Birth
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Date of Birth", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Personnel Entitlement Condition Type
let pect = (data_chars.next().unwrap(), data_chars.next().unwrap());
out.push(("Personnel Entitlement Condition Type", lookup_pect(pect)));
//Rank
out.push(("Rank", data_chars.by_ref().take(6).collect::<String>()));
//Pay Plan Code
out.push(("Pay Plan Code", data_chars.by_ref().take(2).collect::<String>()));
//Pay Plan Grade Code
out.push(("Pay Plan Grade Code", data_chars.by_ref().take(2).collect::<String>()));
//Card Issue Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Issue Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Expiration Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Expiration Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
if data.len() == 89 {
//Middle Initial
let initial = data_chars.next().unwrap();
out.push(("Middle Initial", initial.to_string()));
}
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn | (data: String) -> String {
let mut data_chars = data.chars();
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "Code39".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn lookup_pdt(pdt: char) -> String {
match pdt {
'S' => "Social Security Number (SSN)".to_string(),
'N' => "9 digits, not valid SSN".to_string(),
'P' => "Special code before SSNs".to_string(),
'D' => "Temporary Identifier Number (TIN)".to_string(),
'F' => "Foreign Identifier Number (FIN)".to_string(),
'T' => "Test (858 series)".to_string(),
'I' => "Individual Taxpayer Identification Number".to_string(),
_ => format!("Unknown Type {}", pdt),
}
}
fn lookup_ppc(ppc: char) -> String {
match ppc {
'A' => "Active Duty member".to_string(),
'B' => "Presidential Appointee".to_string(),
'C' => "DoD civil service employee".to_string(),
'D' => "100% disabled American veteran".to_string(),
'E' => "DoD contract employee".to_string(),
'F' => "Former member".to_string(),
'N' | 'G' => "National Guard member".to_string(),
'H' => "Medal of Honor recipient".to_string(),
'I' => "Non-DoD Civil Service Employee".to_string(),
'J' => "Academy student".to_string(),
'K' => "non-appropriated fund (NAF) DoD employee".to_string(),
'L' => "Lighthouse service".to_string(),
'M' => "Non-Government agency personnel".to_string(),
'O' => "Non-DoD contract employee".to_string(),
'Q' => "Reserve retiree not yet eligible for retired pay".to_string(),
'R' => "Retired Uniformed Service member eligible for retired pay".to_string(),
'V' | 'S' => "Reserve member".to_string(),
'T' => "Foreign military member".to_string(),
'U' => "Foreign national employee".to_string(),
'W' => "DoD Beneficiary".to_string(),
'Y' => "Retired DoD Civil Service Employees".to_string(),
_ => format!("Unknown Type {}", ppc),
}
}
fn lookup_branch(branch: char) -> String {
match branch {
'A' => "USA".to_string(),
'C' => "USCG".to_string(),
'D' => "DOD".to_string(),
'F' => "USAF".to_string(),
'H' => "USPHS".to_string(),
'M' => "USMC".to_string(),
'N' => "USN".to_string(),
'O' => "NOAA".to_string(),
'1' => "Foreign Army".to_string(),
'2' => "Foreign Navy".to_string(),
'3' => "Foreign Marine Corps".to_string(),
'4' => "Foreign Air Force".to_string(),
'X' => "Other".to_string(),
_ => format!("Unknown Type {}", branch),
}
}
fn lookup_pect(pect: (char, char)) -> String {
match pect {
('0', '1') => "On Active Duty. Segment condition.".to_string(),
('0', '2') => "Mobilization. Segment condition.".to_string(),
('0', '3') => "On appellate leave. Segment condition.".to_string(),
('0', '4') => "Military prisoner. Segment condition.".to_string(),
('0', '5') => "POW/MIA. Segment condition.".to_string(),
('0', '6') => "Separated from Selected Reserve. Event condition.".to_string(),
('0', '7') => "Declared permanently disabled after temporary disability period. Event condition.".to_string(),
('0', '8') => "On non-CONUS assignment. Segment condition.".to_string(),
('0', '9') => "Living in Guam or Puerto Rico. Segment condition.".to_string(),
('1', '0') => "Living in government quarters. Segment condition.".to_string(),
('1', '1') => "Death determined to be related to an injury, illness, or disease while on Active duty for training or while traveling to or from a place of duty. Event condition.".to_string(),
('1', '2') => "Discharged due to misconduct involving family member abuse. (Sponsors who are eligible for retirement.) Segment condition.".to_string(),
('1', '3') => "Granted retired pay. Event condition.".to_string(),
('1', '4') => "DoD sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '5') => "DoD non-sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '6') => "DoD sponsored overseas. Segment condition.".to_string(),
('1', '7') => "Deserter. Segment condition.".to_string(),
('1', '8') => "Discharged due to misconduct involving family member abuse. (Sponsors who are not eligible for retirement.) Segment condition.".to_string(),
('1', '9') => "Reservist who dies after receiving their 20 year letter. Event condition.".to_string(),
('2', '0') => "Transitional assistance (TA-30). Segment condition.".to_string(),
('2', '1') => "Transitional assistance (TA-Res). Segment condition.".to_string(),
('2', '2') => "Transitional assistance (TA-60). Segment condition.".to_string(),
('2', '3') => "Transitional assistance (TA-120). Segment condition.".to_string(),
('2', '4') => "Transitional assistance (SSB program). Segment condition.".to_string(),
('2', '5') => "Transitional assistance (VSI program). Segment condition.".to_string(),
('2', '6') => "Transitional assistance (composite). Segment condition.".to_string(),
('2', '7') => "Senior Executive Service (SES).".to_string(),
('2', '8') => "Emergency Essential - overseas only.".to_string(),
('2', '9') => "Emergency Essential - CONUS.".to_string(),
('3', '0') => "Emergency Essential - CONUS in living quarters, living on base, and not drawing a basic allowance for quarters, serving in an emergency essential capacity.".to_string(),
('3', '1') => "Reserve Component TA-120 Reserve Component Transition Assistance TA 120 (Jan 1, 2002 or later).".to_string(),
('3', '2') => "On MSC owned and operated vessels Deployed to foreign countries on Military Sealift Command owned and operated vessels. Segment condition.".to_string(),
('3', '3') => "Guard/Reserve Alert Notification Period.".to_string(),
('3', '4') | ('3', '5') => "Reserve Component TA-180 - 180 days TAMPS for reserve return from named contingencies.".to_string(),
('3', '6') | ('3', '7') => "TA-180 - 180 days TAMP for involuntary separation.".to_string(),
('3', '8') => "Living in Government Quarters in Guam or Puerto Rico, Living on base and not drawing an allowance for quarters in Guam or Puerto Rico.".to_string(),
('3', '9') => "Reserve Component TA-180 - TAMP - Mobilized for Contingency.".to_string(),
('4', '0') => "TA-180 TAMP - SPD Code Separation.".to_string(),
('4', '1') => "TA-180 - TAMP - Stop/Loss Separation.".to_string(),
('4', '2') => "DoD Non-Sponsored Overseas - Foreign Military personnel serving OCONUS not sponsored by DoD.".to_string(),
_ => format!("Unknown Type {}{}", pect.0, pect.1),
}
}
| decode_code39 | identifier_name |
main.rs | //This project was inspired by https://github.com/jkusner/CACBarcode/blob/master/cacbarcode.py
extern crate base_custom; use base_custom::BaseCustom;
extern crate chrono; use chrono::prelude::*;
extern crate time; use time::Duration;
fn main() {
if std::env::args().count() > 1 {
println!("For security, the barcodes should only be passed via stdin, not as arguments.");
std::process::exit(1);
}
println!("Common Access Cards have two barcodes.");
println!("One the front (PDF417), and one the back (Code39).");
println!("Get an application that can read a PDF417 barcode.");
println!("Copy and paste it into here, and I will decode it.");
println!("The decoded info will only be presented here, and will not be saved.");
println!();
use std::io::prelude::*;
let stdin = std::io::stdin();
for line in stdin.lock().lines() {
println!("{}", decode(line.unwrap()));
}
}
fn decode(data: String) -> String |
fn decode_pdf217(data: String) -> String {
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let base_time = Utc.ymd(1000, 1, 1);
let mut data_chars = data.chars();
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "PDF217".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' | 'N' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
println!("1");
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
println!("2");
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
println!("3");
//First Name
out.push(("First Name", data_chars.by_ref().take(20).collect::<String>()));
//Last Name
out.push(("Last Name", data_chars.by_ref().take(26).collect::<String>()));
//Date of Birth
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Date of Birth", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Personnel Entitlement Condition Type
let pect = (data_chars.next().unwrap(), data_chars.next().unwrap());
out.push(("Personnel Entitlement Condition Type", lookup_pect(pect)));
//Rank
out.push(("Rank", data_chars.by_ref().take(6).collect::<String>()));
//Pay Plan Code
out.push(("Pay Plan Code", data_chars.by_ref().take(2).collect::<String>()));
//Pay Plan Grade Code
out.push(("Pay Plan Grade Code", data_chars.by_ref().take(2).collect::<String>()));
//Card Issue Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Issue Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Expiration Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Expiration Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
if data.len() == 89 {
//Middle Initial
let initial = data_chars.next().unwrap();
out.push(("Middle Initial", initial.to_string()));
}
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn decode_code39(data: String) -> String {
let mut data_chars = data.chars();
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "Code39".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn lookup_pdt(pdt: char) -> String {
match pdt {
'S' => "Social Security Number (SSN)".to_string(),
'N' => "9 digits, not valid SSN".to_string(),
'P' => "Special code before SSNs".to_string(),
'D' => "Temporary Identifier Number (TIN)".to_string(),
'F' => "Foreign Identifier Number (FIN)".to_string(),
'T' => "Test (858 series)".to_string(),
'I' => "Individual Taxpayer Identification Number".to_string(),
_ => format!("Unknown Type {}", pdt),
}
}
fn lookup_ppc(ppc: char) -> String {
match ppc {
'A' => "Active Duty member".to_string(),
'B' => "Presidential Appointee".to_string(),
'C' => "DoD civil service employee".to_string(),
'D' => "100% disabled American veteran".to_string(),
'E' => "DoD contract employee".to_string(),
'F' => "Former member".to_string(),
'N' | 'G' => "National Guard member".to_string(),
'H' => "Medal of Honor recipient".to_string(),
'I' => "Non-DoD Civil Service Employee".to_string(),
'J' => "Academy student".to_string(),
'K' => "non-appropriated fund (NAF) DoD employee".to_string(),
'L' => "Lighthouse service".to_string(),
'M' => "Non-Government agency personnel".to_string(),
'O' => "Non-DoD contract employee".to_string(),
'Q' => "Reserve retiree not yet eligible for retired pay".to_string(),
'R' => "Retired Uniformed Service member eligible for retired pay".to_string(),
'V' | 'S' => "Reserve member".to_string(),
'T' => "Foreign military member".to_string(),
'U' => "Foreign national employee".to_string(),
'W' => "DoD Beneficiary".to_string(),
'Y' => "Retired DoD Civil Service Employees".to_string(),
_ => format!("Unknown Type {}", ppc),
}
}
fn lookup_branch(branch: char) -> String {
match branch {
'A' => "USA".to_string(),
'C' => "USCG".to_string(),
'D' => "DOD".to_string(),
'F' => "USAF".to_string(),
'H' => "USPHS".to_string(),
'M' => "USMC".to_string(),
'N' => "USN".to_string(),
'O' => "NOAA".to_string(),
'1' => "Foreign Army".to_string(),
'2' => "Foreign Navy".to_string(),
'3' => "Foreign Marine Corps".to_string(),
'4' => "Foreign Air Force".to_string(),
'X' => "Other".to_string(),
_ => format!("Unknown Type {}", branch),
}
}
fn lookup_pect(pect: (char, char)) -> String {
match pect {
('0', '1') => "On Active Duty. Segment condition.".to_string(),
('0', '2') => "Mobilization. Segment condition.".to_string(),
('0', '3') => "On appellate leave. Segment condition.".to_string(),
('0', '4') => "Military prisoner. Segment condition.".to_string(),
('0', '5') => "POW/MIA. Segment condition.".to_string(),
('0', '6') => "Separated from Selected Reserve. Event condition.".to_string(),
('0', '7') => "Declared permanently disabled after temporary disability period. Event condition.".to_string(),
('0', '8') => "On non-CONUS assignment. Segment condition.".to_string(),
('0', '9') => "Living in Guam or Puerto Rico. Segment condition.".to_string(),
('1', '0') => "Living in government quarters. Segment condition.".to_string(),
('1', '1') => "Death determined to be related to an injury, illness, or disease while on Active duty for training or while traveling to or from a place of duty. Event condition.".to_string(),
('1', '2') => "Discharged due to misconduct involving family member abuse. (Sponsors who are eligible for retirement.) Segment condition.".to_string(),
('1', '3') => "Granted retired pay. Event condition.".to_string(),
('1', '4') => "DoD sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '5') => "DoD non-sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '6') => "DoD sponsored overseas. Segment condition.".to_string(),
('1', '7') => "Deserter. Segment condition.".to_string(),
('1', '8') => "Discharged due to misconduct involving family member abuse. (Sponsors who are not eligible for retirement.) Segment condition.".to_string(),
('1', '9') => "Reservist who dies after receiving their 20 year letter. Event condition.".to_string(),
('2', '0') => "Transitional assistance (TA-30). Segment condition.".to_string(),
('2', '1') => "Transitional assistance (TA-Res). Segment condition.".to_string(),
('2', '2') => "Transitional assistance (TA-60). Segment condition.".to_string(),
('2', '3') => "Transitional assistance (TA-120). Segment condition.".to_string(),
('2', '4') => "Transitional assistance (SSB program). Segment condition.".to_string(),
('2', '5') => "Transitional assistance (VSI program). Segment condition.".to_string(),
('2', '6') => "Transitional assistance (composite). Segment condition.".to_string(),
('2', '7') => "Senior Executive Service (SES).".to_string(),
('2', '8') => "Emergency Essential - overseas only.".to_string(),
('2', '9') => "Emergency Essential - CONUS.".to_string(),
('3', '0') => "Emergency Essential - CONUS in living quarters, living on base, and not drawing a basic allowance for quarters, serving in an emergency essential capacity.".to_string(),
('3', '1') => "Reserve Component TA-120 Reserve Component Transition Assistance TA 120 (Jan 1, 2002 or later).".to_string(),
('3', '2') => "On MSC owned and operated vessels Deployed to foreign countries on Military Sealift Command owned and operated vessels. Segment condition.".to_string(),
('3', '3') => "Guard/Reserve Alert Notification Period.".to_string(),
('3', '4') | ('3', '5') => "Reserve Component TA-180 - 180 days TAMPS for reserve return from named contingencies.".to_string(),
('3', '6') | ('3', '7') => "TA-180 - 180 days TAMP for involuntary separation.".to_string(),
('3', '8') => "Living in Government Quarters in Guam or Puerto Rico, Living on base and not drawing an allowance for quarters in Guam or Puerto Rico.".to_string(),
('3', '9') => "Reserve Component TA-180 - TAMP - Mobilized for Contingency.".to_string(),
('4', '0') => "TA-180 TAMP - SPD Code Separation.".to_string(),
('4', '1') => "TA-180 - TAMP - Stop/Loss Separation.".to_string(),
('4', '2') => "DoD Non-Sponsored Overseas - Foreign Military personnel serving OCONUS not sponsored by DoD.".to_string(),
_ => format!("Unknown Type {}{}", pect.0, pect.1),
}
}
| {
match data.len() {
18 => return decode_code39(data),
88 | 89 => return decode_pdf217(data),
_ => return format!("Incorrect barcode length: {}. Make sure to include all spaces.", data.len()),
}
} | identifier_body |
mod.rs | use crate::domain;
use crate::ops;
use crate::prelude::*;
use petgraph;
use std::collections::{HashMap, HashSet};
use std::ops::{Deref, DerefMut};
mod process;
#[cfg(test)]
pub(crate) use self::process::materialize;
pub mod special;
mod ntype;
pub use self::ntype::NodeType; // crate viz for tests
mod debug;
// NOTE(jfrg): the migration code should probably move into the dataflow crate...
// it is the reason why so much stuff here is pub
#[derive(Clone, Serialize, Deserialize)]
pub struct Node {
name: String,
index: Option<IndexPair>,
domain: Option<domain::Index>,
fields: Vec<String>,
parents: Vec<LocalNodeIndex>,
children: Vec<LocalNodeIndex>,
inner: NodeType,
taken: bool,
pub purge: bool,
sharded_by: Sharding,
}
// constructors
impl Node {
pub fn new<S1, FS, S2, NT>(name: S1, fields: FS, inner: NT) -> Node
where
S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>,
NT: Into<NodeType>,
{
Node {
name: name.to_string(),
index: None,
domain: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
parents: Vec::new(),
children: Vec::new(),
inner: inner.into(),
taken: false,
purge: false,
sharded_by: Sharding::None,
}
}
pub fn mirror<NT: Into<NodeType>>(&self, n: NT) -> Node {
Self::new(&*self.name, &self.fields, n)
}
pub fn named_mirror<NT: Into<NodeType>>(&self, n: NT, name: String) -> Node {
Self::new(name, &self.fields, n)
}
}
#[must_use]
pub struct DanglingDomainNode(Node);
impl DanglingDomainNode {
pub fn finalize(self, graph: &Graph) -> Node {
let mut n = self.0;
let ni = n.global_addr();
let dm = n.domain();
n.children = graph
.neighbors_directed(ni, petgraph::EdgeDirection::Outgoing)
.filter(|&c| graph[c].domain() == dm)
.map(|ni| graph[ni].local_addr())
.collect();
n.parents = graph
.neighbors_directed(ni, petgraph::EdgeDirection::Incoming)
.filter(|&c|!graph[c].is_source() && graph[c].domain() == dm)
.map(|ni| graph[ni].local_addr())
.collect();
n
}
}
// expternal parts of Ingredient
impl Node {
/// Called when a node is first connected to the graph.
///
/// All its ancestors are present, but this node and its children may not have been connected
/// yet.
pub fn on_connected(&mut self, graph: &Graph) {
Ingredient::on_connected(&mut **self, graph)
}
pub fn on_commit(&mut self, remap: &HashMap<NodeIndex, IndexPair>) {
// this is *only* overwritten for these asserts.
assert!(!self.taken);
if let NodeType::Internal(ref mut i) = self.inner {
i.on_commit(self.index.unwrap().as_global(), remap)
}
}
/// May return a set of nodes such that *one* of the given ancestors *must* be the one to be
/// replayed if this node's state is to be initialized.
pub fn must_replay_among(&self) -> Option<HashSet<NodeIndex>> {
Ingredient::must_replay_among(&**self)
}
/// Translate a column in this ingredient into the corresponding column(s) in
/// parent ingredients. None for the column means that the parent doesn't
/// have an associated column. Similar to resolve, but does not depend on
/// materialization, and returns results even for computed columns.
pub fn parent_columns(&self, column: usize) -> Vec<(NodeIndex, Option<usize>)> {
Ingredient::parent_columns(&**self, column)
}
/// Resolve where the given field originates from. If the view is materialized, or the value is
/// otherwise created by this view, None should be returned.
pub fn resolve(&self, i: usize) -> Option<Vec<(NodeIndex, usize)>> {
Ingredient::resolve(&**self, i)
}
/// Returns true if this operator requires a full materialization
pub fn requires_full_materialization(&self) -> bool {
Ingredient::requires_full_materialization(&**self)
}
pub fn can_query_through(&self) -> bool {
Ingredient::can_query_through(&**self)
}
pub fn is_join(&self) -> bool {
Ingredient::is_join(&**self)
}
pub fn ancestors(&self) -> Vec<NodeIndex> {
Ingredient::ancestors(&**self)
}
/// Produce a compact, human-readable description of this node for Graphviz.
///
/// If `detailed` is true, emit more info.
///
/// Symbol Description
/// --------|-------------
/// B | Base
/// || | Concat
/// ⧖ | Latest
/// γ | Group by
/// |*| | Count
/// 𝛴 | Sum
/// ⋈ | Join
/// ⋉ | Left join
/// ⋃ | Union
pub fn description(&self, detailed: bool) -> String {
Ingredient::description(&**self, detailed)
}
}
// publicly accessible attributes
impl Node {
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn sharded_by(&self) -> Sharding {
self.sharded_by
}
/// Set this node's sharding property.
pub fn shard_by(&mut self, s: Sharding) {
self.sharded_by = s;
}
}
// events
impl Node {
pub fn take(&mut self) -> DanglingDomainNode {
assert!(!self.taken);
assert!(
(!self.is_internal() &&!self.is_base()) || self.domain.is_some(),
"tried to take unassigned node"
);
let inner = self.inner.take();
let mut n = self.mirror(inner);
n.index = self.index;
n.domain = self.domain;
n.purge = self.purge;
self.taken = true;
DanglingDomainNode(n)
}
pub fn remove(&mut self) {
self.inner = NodeType::Dropped;
}
}
// derefs
impl Node {
pub(crate) fn with_sharder_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Sharder),
{
match self.inner {
NodeType::Sharder(ref mut s) => f(s),
_ => unreachable!(),
}
}
pub fn with_sharder<'a, F, R>(&'a self, f: F) -> Option<R>
where
F: FnOnce(&'a special::Sharder) -> R,
R: 'a,
{
match self.inner {
NodeType::Sharder(ref s) => Some(f(s)),
_ => None,
}
}
pub(crate) fn with_egress_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Egress),
{
match self.inner {
NodeType::Egress(Some(ref mut e)) => f(e),
_ => unreachable!(),
}
}
pub fn with_reader_mut<'a, F, R>(&'a mut self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a mut special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref mut r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn with_reader<'a, F, R>(&'a self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn get_base(&self) -> Option<&special::Base> {
if let NodeType::Base(ref b) = self.inner {
Some(b)
} else {
None
}
}
pub fn suggest_indexes(&self, n: NodeIndex) -> HashMap<NodeIndex, Vec<usize>> {
match self.inner {
NodeType::Internal(ref i) => i.suggest_indexes(n),
NodeType::Base(ref b) => b.suggest_indexes(n),
_ => HashMap::new(),
}
}
}
impl Deref for Node {
type Target = ops::NodeOperator;
fn deref(&self) -> &Self::Target {
match self.inner {
NodeType::Internal(ref i) => i,
_ => unreachable!(),
}
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
assert!(!self.taken);
match self.inner {
NodeType::Internal(ref mut i) => i,
_ => unreachable!(),
}
}
}
// neighbors
impl Node {
pub(crate) fn children(&self) -> &[LocalNodeIndex] {
&self.children
}
pub(crate) fn parents(&self) -> &[LocalNodeIndex] {
&self.parents
}
}
// attributes
impl Node {
pub(crate) fn beyond_mat_frontier(&self) -> bool {
self.purge
}
pub(crate) fn add_child(&mut self, child: LocalNodeIndex) {
self.children.push(child);
}
pub(crate) fn try_remove_child(&mut self, child: LocalNodeIndex) -> bool {
for i in 0..self.children.len() {
if self.children[i] == child {
self.children.swap_remove(i);
return true;
}
}
false
}
pub fn add_column(&mut self, field: &str) -> usize {
self.fields.push(field.to_string());
self.fields.len() - 1
}
pub fn has_domain(&self) -> bool {
self.domain.is_some()
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!(
"asked for unset domain for {:?} {}",
self,
self.global_addr().index()
);
}
}
}
pub fn local_addr(&self) -> LocalNodeIndex {
match self.index {
Some(idx) if idx.has_local() => *idx,
Some(_) | None => unreachable!("asked for unset addr for {:?}", self),
}
}
pub fn global_addr(&self) -> NodeIndex {
match self.index {
Some(ref index) => index.as_global(),
None => {
unreachable!("asked for unset index for {:?}", self);
}
}
}
pub fn get_base_mut(&mut self) -> Option<&mut special::Base> {
if let NodeType::Base(ref mut b) = self.inner {
Some(b)
} else {
None
}
}
pub fn add_to(&mut self, domain: domain::Index) {
assert_eq!(self.domain, None);
assert!(!self.is_dropped());
self.domain = Some(domain);
}
pub fn set_finalized_addr(&mut self, addr: IndexPair) {
self.index = Some(addr);
}
}
// is this or that?
impl Node {
pub fn is_dropped(&self) -> bool {
if let NodeType::Dropped = self.inner {
true
} else {
false
}
}
pub fn is_egress(&self) -> bool {
if let NodeType::Egress {.. } = self.inner {
true
} else {
false
}
}
pub fn is_reader(&self) -> bool {
if let NodeType::Reader {.. } = self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let NodeType::Ingress = self.inner {
true
} else {
false
}
}
pub fn is_sender(&self) -> bool {
match self.inner {
NodeType::Egress {.. } | NodeType::Sharder(..) => true,
_ => false,
}
}
pub fn is_internal(&self) -> bool {
if let NodeType::Internal(..) = self.inner {
true
} else {
false
}
}
pub fn is_source(&self) -> bool {
if let NodeType::Source {.. } = self.inner {
true
} else {
false
}
}
pub fn is_sharder(&self) -> bool {
if let NodeType::Sharder {.. } = self.inner {
true
} else {
false
}
}
pub fn is_base(&sel | ool {
if let NodeType::Base(..) = self.inner {
true
} else {
false
}
}
pub fn is_union(&self) -> bool {
if let NodeType::Internal(NodeOperator::Union(_)) = self.inner {
true
} else {
false
}
}
pub fn is_shard_merger(&self) -> bool {
if let NodeType::Internal(NodeOperator::Union(ref u)) = self.inner {
u.is_shard_merger()
} else {
false
}
}
}
| f) -> b | identifier_name |
mod.rs | use crate::domain;
use crate::ops;
use crate::prelude::*;
use petgraph;
use std::collections::{HashMap, HashSet};
use std::ops::{Deref, DerefMut};
mod process;
#[cfg(test)]
pub(crate) use self::process::materialize;
pub mod special;
mod ntype;
pub use self::ntype::NodeType; // crate viz for tests
mod debug;
// NOTE(jfrg): the migration code should probably move into the dataflow crate...
// it is the reason why so much stuff here is pub
#[derive(Clone, Serialize, Deserialize)]
pub struct Node {
name: String,
index: Option<IndexPair>,
domain: Option<domain::Index>,
fields: Vec<String>,
parents: Vec<LocalNodeIndex>,
children: Vec<LocalNodeIndex>,
inner: NodeType,
taken: bool,
pub purge: bool,
sharded_by: Sharding,
}
// constructors
impl Node {
pub fn new<S1, FS, S2, NT>(name: S1, fields: FS, inner: NT) -> Node
where
S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>,
NT: Into<NodeType>,
{
Node {
name: name.to_string(),
index: None,
domain: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
parents: Vec::new(),
children: Vec::new(),
inner: inner.into(),
taken: false,
purge: false,
sharded_by: Sharding::None,
}
}
pub fn mirror<NT: Into<NodeType>>(&self, n: NT) -> Node {
Self::new(&*self.name, &self.fields, n)
}
pub fn named_mirror<NT: Into<NodeType>>(&self, n: NT, name: String) -> Node {
Self::new(name, &self.fields, n)
}
}
#[must_use]
pub struct DanglingDomainNode(Node);
impl DanglingDomainNode {
pub fn finalize(self, graph: &Graph) -> Node {
let mut n = self.0;
let ni = n.global_addr();
let dm = n.domain();
n.children = graph
.neighbors_directed(ni, petgraph::EdgeDirection::Outgoing)
.filter(|&c| graph[c].domain() == dm)
.map(|ni| graph[ni].local_addr())
.collect();
n.parents = graph
.neighbors_directed(ni, petgraph::EdgeDirection::Incoming)
.filter(|&c|!graph[c].is_source() && graph[c].domain() == dm)
.map(|ni| graph[ni].local_addr())
.collect();
n
}
}
// expternal parts of Ingredient
impl Node {
/// Called when a node is first connected to the graph.
///
/// All its ancestors are present, but this node and its children may not have been connected
/// yet.
pub fn on_connected(&mut self, graph: &Graph) {
Ingredient::on_connected(&mut **self, graph)
}
pub fn on_commit(&mut self, remap: &HashMap<NodeIndex, IndexPair>) {
// this is *only* overwritten for these asserts.
assert!(!self.taken);
if let NodeType::Internal(ref mut i) = self.inner {
i.on_commit(self.index.unwrap().as_global(), remap)
}
}
/// May return a set of nodes such that *one* of the given ancestors *must* be the one to be
/// replayed if this node's state is to be initialized.
pub fn must_replay_among(&self) -> Option<HashSet<NodeIndex>> {
Ingredient::must_replay_among(&**self)
}
/// Translate a column in this ingredient into the corresponding column(s) in
/// parent ingredients. None for the column means that the parent doesn't
/// have an associated column. Similar to resolve, but does not depend on
/// materialization, and returns results even for computed columns.
pub fn parent_columns(&self, column: usize) -> Vec<(NodeIndex, Option<usize>)> {
Ingredient::parent_columns(&**self, column)
}
/// Resolve where the given field originates from. If the view is materialized, or the value is
/// otherwise created by this view, None should be returned.
pub fn resolve(&self, i: usize) -> Option<Vec<(NodeIndex, usize)>> {
Ingredient::resolve(&**self, i)
}
/// Returns true if this operator requires a full materialization
pub fn requires_full_materialization(&self) -> bool {
Ingredient::requires_full_materialization(&**self)
}
pub fn can_query_through(&self) -> bool {
Ingredient::can_query_through(&**self)
}
pub fn is_join(&self) -> bool {
Ingredient::is_join(&**self)
}
pub fn ancestors(&self) -> Vec<NodeIndex> {
Ingredient::ancestors(&**self)
}
/// Produce a compact, human-readable description of this node for Graphviz.
///
/// If `detailed` is true, emit more info.
///
/// Symbol Description
/// --------|-------------
/// B | Base
/// || | Concat
/// ⧖ | Latest
/// γ | Group by
/// |*| | Count
/// 𝛴 | Sum
/// ⋈ | Join
/// ⋉ | Left join
/// ⋃ | Union
pub fn description(&self, detailed: bool) -> String {
Ingredient::description(&**self, detailed)
}
}
// publicly accessible attributes
impl Node {
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn sharded_by(&self) -> Sharding {
self.sharded_by
}
/// Set this node's sharding property.
pub fn shard_by(&mut self, s: Sharding) {
self.sharded_by = s;
}
}
// events
impl Node {
pub fn take(&mut self) -> DanglingDomainNode {
assert!(!self.taken);
assert!(
(!self.is_internal() &&!self.is_base()) || self.domain.is_some(),
"tried to take unassigned node"
);
let inner = self.inner.take();
let mut n = self.mirror(inner);
n.index = self.index;
n.domain = self.domain;
n.purge = self.purge;
self.taken = true;
DanglingDomainNode(n)
}
pub fn remove(&mut self) {
self.inner = NodeType::Dropped;
}
}
// derefs
impl Node {
pub(crate) fn with_sharder_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Sharder),
{
match self.inner {
NodeType::Sharder(ref mut s) => f(s),
_ => unreachable!(),
}
}
pub fn with_sharder<'a, F, R>(&'a self, f: F) -> Option<R>
where
F: FnOnce(&'a special::Sharder) -> R,
R: 'a,
{
match self.inner {
NodeType::Sharder(ref s) => Some(f(s)),
_ => None,
}
}
pub(crate) fn with_egress_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Egress),
{
match self.inner {
NodeType::Egress(Some(ref mut e)) => f(e),
_ => unreachable!(),
}
}
pub fn with_reader_mut<'a, F, R>(&'a mut self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a mut special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref mut r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn with_reader<'a, F, R>(&'a self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn get_base(&self) -> Option<&special::Base> {
if let NodeType::Base(ref b) = self.inner {
Some(b)
} else {
None
}
}
pub fn suggest_indexes(&self, n: NodeIndex) -> HashMap<NodeIndex, Vec<usize>> {
match self.inner {
NodeType::Internal(ref i) => i.suggest_indexes(n),
NodeType::Base(ref b) => b.suggest_indexes(n),
_ => HashMap::new(),
}
}
}
impl Deref for Node {
type Target = ops::NodeOperator;
fn deref(&self) -> &Self::Target {
match self.inner {
NodeType::Internal(ref i) => i,
_ => unreachable!(),
}
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
assert!(!self.taken);
match self.inner {
NodeType::Internal(ref mut i) => i,
_ => unreachable!(),
}
}
}
// neighbors
impl Node {
pub(crate) fn children(&self) -> &[LocalNodeIndex] {
&self.children
}
pub(crate) fn parents(&self) -> &[LocalNodeIndex] {
&self.parents
}
}
// attributes
impl Node {
pub(crate) fn beyond_mat_frontier(&self) -> bool {
self.purge
}
pub(crate) fn add_child(&mut self, child: LocalNodeIndex) {
self.children.push(child);
}
pub(crate) fn try_remove_child(&mut self, child: LocalNodeIndex) -> bool {
for i in 0..self.children.len() {
if self.children[i] == child {
self.children.swap_remove(i);
return true;
}
}
false
}
pub fn add_column(&mut self, field: &str) -> usize {
self.fields.push(field.to_string());
self.fields.len() - 1
}
pub fn has_domain(&self) -> bool {
self.domain.is_some()
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!(
"asked for unset domain for {:?} {}",
self,
self.global_addr().index()
);
}
}
}
pub fn local_addr(&self) -> LocalNodeIndex {
match self.index {
Some(idx) if idx.has_local() => *idx,
Some(_) | None => unreachable!("asked for unset addr for {:?}", self),
}
}
pub fn global_addr(&self) -> NodeIndex {
match self.index {
Some(ref index) => index.as_global(),
None => {
unreachable!("asked for unset index for {:?}", self);
}
}
}
pub fn get_base_mut(&mut self) -> Option<&mut special::Base> {
if let NodeType::Base(ref mut b) = self.inner {
Some(b)
} else {
None
}
}
pub fn add_to(&mut self, domain: domain::Index) {
assert_eq!(self.domain, None);
assert!(!self.is_dropped());
self.domain = Some(domain);
}
pub fn set_finalized_addr(&mut self, addr: IndexPair) {
self.index = Some(addr);
}
}
// is this or that?
impl Node {
pub fn is_dropped(&self) -> bool {
if let NodeType::Dropped = self.inner {
true
} else {
false
}
}
pub fn is_egress(&self) -> bool {
if let NodeType::Egress {.. } = self.inner {
true
} else {
false
}
}
pub fn is_reader(&self) -> bool {
if let NodeType::Reader {.. } = self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let NodeType::Ingress = self.inner {
true
} else {
false
}
}
pub fn is_sender(&self) -> bool {
match self.inner {
NodeType::Egress {.. } | NodeType::Sharder(..) => true,
_ => false,
}
}
pub fn is_internal(&self) -> bool {
if let NodeType::Internal(..) = self.inner {
true
} else {
false
}
}
pub fn is_source(&self) -> bool {
if let NodeType::Source {.. } = self.inner {
true
} else {
false
}
}
pub fn is_sharder(&self) -> bool {
if let NodeType::Sharder {.. } = self.inner {
true
} else {
false
}
}
pub fn is_base(&self) -> bool {
if let NodeType::Base(..) = self.inner {
true
} else {
false
}
}
pub fn is_union(&self) -> bool {
if | is_shard_merger(&self) -> bool {
if let NodeType::Internal(NodeOperator::Union(ref u)) = self.inner {
u.is_shard_merger()
} else {
false
}
}
}
| let NodeType::Internal(NodeOperator::Union(_)) = self.inner {
true
} else {
false
}
}
pub fn | identifier_body |
mod.rs | use crate::domain;
use crate::ops;
use crate::prelude::*;
use petgraph;
use std::collections::{HashMap, HashSet};
use std::ops::{Deref, DerefMut};
mod process;
#[cfg(test)]
pub(crate) use self::process::materialize;
pub mod special;
mod ntype;
pub use self::ntype::NodeType; // crate viz for tests
mod debug;
// NOTE(jfrg): the migration code should probably move into the dataflow crate...
// it is the reason why so much stuff here is pub
#[derive(Clone, Serialize, Deserialize)]
pub struct Node {
name: String,
index: Option<IndexPair>,
domain: Option<domain::Index>,
fields: Vec<String>,
parents: Vec<LocalNodeIndex>,
children: Vec<LocalNodeIndex>,
inner: NodeType,
taken: bool,
pub purge: bool,
sharded_by: Sharding,
}
// constructors
impl Node {
pub fn new<S1, FS, S2, NT>(name: S1, fields: FS, inner: NT) -> Node
where
S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>,
NT: Into<NodeType>,
{
Node {
name: name.to_string(), | children: Vec::new(),
inner: inner.into(),
taken: false,
purge: false,
sharded_by: Sharding::None,
}
}
pub fn mirror<NT: Into<NodeType>>(&self, n: NT) -> Node {
Self::new(&*self.name, &self.fields, n)
}
pub fn named_mirror<NT: Into<NodeType>>(&self, n: NT, name: String) -> Node {
Self::new(name, &self.fields, n)
}
}
#[must_use]
pub struct DanglingDomainNode(Node);
impl DanglingDomainNode {
pub fn finalize(self, graph: &Graph) -> Node {
let mut n = self.0;
let ni = n.global_addr();
let dm = n.domain();
n.children = graph
.neighbors_directed(ni, petgraph::EdgeDirection::Outgoing)
.filter(|&c| graph[c].domain() == dm)
.map(|ni| graph[ni].local_addr())
.collect();
n.parents = graph
.neighbors_directed(ni, petgraph::EdgeDirection::Incoming)
.filter(|&c|!graph[c].is_source() && graph[c].domain() == dm)
.map(|ni| graph[ni].local_addr())
.collect();
n
}
}
// expternal parts of Ingredient
impl Node {
/// Called when a node is first connected to the graph.
///
/// All its ancestors are present, but this node and its children may not have been connected
/// yet.
pub fn on_connected(&mut self, graph: &Graph) {
Ingredient::on_connected(&mut **self, graph)
}
pub fn on_commit(&mut self, remap: &HashMap<NodeIndex, IndexPair>) {
// this is *only* overwritten for these asserts.
assert!(!self.taken);
if let NodeType::Internal(ref mut i) = self.inner {
i.on_commit(self.index.unwrap().as_global(), remap)
}
}
/// May return a set of nodes such that *one* of the given ancestors *must* be the one to be
/// replayed if this node's state is to be initialized.
pub fn must_replay_among(&self) -> Option<HashSet<NodeIndex>> {
Ingredient::must_replay_among(&**self)
}
/// Translate a column in this ingredient into the corresponding column(s) in
/// parent ingredients. None for the column means that the parent doesn't
/// have an associated column. Similar to resolve, but does not depend on
/// materialization, and returns results even for computed columns.
pub fn parent_columns(&self, column: usize) -> Vec<(NodeIndex, Option<usize>)> {
Ingredient::parent_columns(&**self, column)
}
/// Resolve where the given field originates from. If the view is materialized, or the value is
/// otherwise created by this view, None should be returned.
pub fn resolve(&self, i: usize) -> Option<Vec<(NodeIndex, usize)>> {
Ingredient::resolve(&**self, i)
}
/// Returns true if this operator requires a full materialization
pub fn requires_full_materialization(&self) -> bool {
Ingredient::requires_full_materialization(&**self)
}
pub fn can_query_through(&self) -> bool {
Ingredient::can_query_through(&**self)
}
pub fn is_join(&self) -> bool {
Ingredient::is_join(&**self)
}
pub fn ancestors(&self) -> Vec<NodeIndex> {
Ingredient::ancestors(&**self)
}
/// Produce a compact, human-readable description of this node for Graphviz.
///
/// If `detailed` is true, emit more info.
///
/// Symbol Description
/// --------|-------------
/// B | Base
/// || | Concat
/// ⧖ | Latest
/// γ | Group by
/// |*| | Count
/// 𝛴 | Sum
/// ⋈ | Join
/// ⋉ | Left join
/// ⋃ | Union
pub fn description(&self, detailed: bool) -> String {
Ingredient::description(&**self, detailed)
}
}
// publicly accessible attributes
impl Node {
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn sharded_by(&self) -> Sharding {
self.sharded_by
}
/// Set this node's sharding property.
pub fn shard_by(&mut self, s: Sharding) {
self.sharded_by = s;
}
}
// events
impl Node {
pub fn take(&mut self) -> DanglingDomainNode {
assert!(!self.taken);
assert!(
(!self.is_internal() &&!self.is_base()) || self.domain.is_some(),
"tried to take unassigned node"
);
let inner = self.inner.take();
let mut n = self.mirror(inner);
n.index = self.index;
n.domain = self.domain;
n.purge = self.purge;
self.taken = true;
DanglingDomainNode(n)
}
pub fn remove(&mut self) {
self.inner = NodeType::Dropped;
}
}
// derefs
impl Node {
pub(crate) fn with_sharder_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Sharder),
{
match self.inner {
NodeType::Sharder(ref mut s) => f(s),
_ => unreachable!(),
}
}
pub fn with_sharder<'a, F, R>(&'a self, f: F) -> Option<R>
where
F: FnOnce(&'a special::Sharder) -> R,
R: 'a,
{
match self.inner {
NodeType::Sharder(ref s) => Some(f(s)),
_ => None,
}
}
pub(crate) fn with_egress_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Egress),
{
match self.inner {
NodeType::Egress(Some(ref mut e)) => f(e),
_ => unreachable!(),
}
}
pub fn with_reader_mut<'a, F, R>(&'a mut self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a mut special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref mut r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn with_reader<'a, F, R>(&'a self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn get_base(&self) -> Option<&special::Base> {
if let NodeType::Base(ref b) = self.inner {
Some(b)
} else {
None
}
}
pub fn suggest_indexes(&self, n: NodeIndex) -> HashMap<NodeIndex, Vec<usize>> {
match self.inner {
NodeType::Internal(ref i) => i.suggest_indexes(n),
NodeType::Base(ref b) => b.suggest_indexes(n),
_ => HashMap::new(),
}
}
}
impl Deref for Node {
type Target = ops::NodeOperator;
fn deref(&self) -> &Self::Target {
match self.inner {
NodeType::Internal(ref i) => i,
_ => unreachable!(),
}
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
assert!(!self.taken);
match self.inner {
NodeType::Internal(ref mut i) => i,
_ => unreachable!(),
}
}
}
// neighbors
impl Node {
pub(crate) fn children(&self) -> &[LocalNodeIndex] {
&self.children
}
pub(crate) fn parents(&self) -> &[LocalNodeIndex] {
&self.parents
}
}
// attributes
impl Node {
pub(crate) fn beyond_mat_frontier(&self) -> bool {
self.purge
}
pub(crate) fn add_child(&mut self, child: LocalNodeIndex) {
self.children.push(child);
}
pub(crate) fn try_remove_child(&mut self, child: LocalNodeIndex) -> bool {
for i in 0..self.children.len() {
if self.children[i] == child {
self.children.swap_remove(i);
return true;
}
}
false
}
pub fn add_column(&mut self, field: &str) -> usize {
self.fields.push(field.to_string());
self.fields.len() - 1
}
pub fn has_domain(&self) -> bool {
self.domain.is_some()
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!(
"asked for unset domain for {:?} {}",
self,
self.global_addr().index()
);
}
}
}
pub fn local_addr(&self) -> LocalNodeIndex {
match self.index {
Some(idx) if idx.has_local() => *idx,
Some(_) | None => unreachable!("asked for unset addr for {:?}", self),
}
}
pub fn global_addr(&self) -> NodeIndex {
match self.index {
Some(ref index) => index.as_global(),
None => {
unreachable!("asked for unset index for {:?}", self);
}
}
}
pub fn get_base_mut(&mut self) -> Option<&mut special::Base> {
if let NodeType::Base(ref mut b) = self.inner {
Some(b)
} else {
None
}
}
pub fn add_to(&mut self, domain: domain::Index) {
assert_eq!(self.domain, None);
assert!(!self.is_dropped());
self.domain = Some(domain);
}
pub fn set_finalized_addr(&mut self, addr: IndexPair) {
self.index = Some(addr);
}
}
// is this or that?
impl Node {
pub fn is_dropped(&self) -> bool {
if let NodeType::Dropped = self.inner {
true
} else {
false
}
}
pub fn is_egress(&self) -> bool {
if let NodeType::Egress {.. } = self.inner {
true
} else {
false
}
}
pub fn is_reader(&self) -> bool {
if let NodeType::Reader {.. } = self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let NodeType::Ingress = self.inner {
true
} else {
false
}
}
pub fn is_sender(&self) -> bool {
match self.inner {
NodeType::Egress {.. } | NodeType::Sharder(..) => true,
_ => false,
}
}
pub fn is_internal(&self) -> bool {
if let NodeType::Internal(..) = self.inner {
true
} else {
false
}
}
pub fn is_source(&self) -> bool {
if let NodeType::Source {.. } = self.inner {
true
} else {
false
}
}
pub fn is_sharder(&self) -> bool {
if let NodeType::Sharder {.. } = self.inner {
true
} else {
false
}
}
pub fn is_base(&self) -> bool {
if let NodeType::Base(..) = self.inner {
true
} else {
false
}
}
pub fn is_union(&self) -> bool {
if let NodeType::Internal(NodeOperator::Union(_)) = self.inner {
true
} else {
false
}
}
pub fn is_shard_merger(&self) -> bool {
if let NodeType::Internal(NodeOperator::Union(ref u)) = self.inner {
u.is_shard_merger()
} else {
false
}
}
} | index: None,
domain: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
parents: Vec::new(), | random_line_split |
gdb_stub.rs | , &mut self.gba);
if self.run_state == RunState::Running {
let deadline = Instant::now() + Duration::from_millis(15);
while Instant::now() < deadline {
self.step_gba();
if let Some(stop_reason) = self.bus_snooper.stop_reason.take() {
note!(GDB, "Stopped in debugger due to {:?}", stop_reason);
self.run_state = RunState::Paused;
self.send(&stop_reason.to_command())?;
break;
}
}
} else {
thread::sleep(Duration::from_millis(1));
}
}
Ok(())
}
pub fn update(&mut self) -> GResult {
if self.listener.is_none() {
return Ok(());
}
let listener = self.listener.as_mut().unwrap();
if self.stream.is_none() {
let (stream, addr) = if let Some(t) = transpose_would_block(listener.accept())? {
t
} else {
return Ok(());
};
note!(GDB, "TcpListener accepted a connection from {}", addr);
stream.set_nonblocking(!self.blocking)?;
self.no_ack_mode = false;
self.stream = Some(stream);
}
// Unwrapping because we ensured it's Some above
let stream = self.stream.as_mut().unwrap();
let mut bytes = [0u8; 1200];
let mut msg: &[u8];
if let Some(amount) = transpose_would_block(stream.read(&mut bytes[..]))? {
if amount == 0 {
trace!(GDB, "Received 0 bytes, closing TcpStream..");
self.stream = None;
return Ok(());
} else {
let ascii_string = bytes_as_ascii(&bytes[..amount]);
trace!(GDB, "Received {} bytes: {:?}", amount, ascii_string);
msg = &bytes[..amount];
}
} else {
return Ok(());
}
while!msg.is_empty() {
let prev = msg;
self.parse_message(&mut msg)?;
// parse_message() must adjust `msg` to exclude the input it consumed.
assert_ne!(prev, msg);
}
Ok(())
}
fn parse_message(&mut self, msg: &mut &[u8]) -> GResult {
match (*msg)[0] {
b'+' => { // ack
*msg = &(*msg)[1..];
return Ok(());
}
b'-' => { // nak
*msg = &(*msg)[1..];
return Ok(());
}
b'$' => {
// Continue on to process this command
}
0x03 => { // Enter debugger
*msg = &(*msg)[1..];
self.run_state = RunState::Paused;
return self.send_fmt(format_args!("S{:02}", SIGINT));
}
first => {
// Skip this character, try parsing from the next character onwards
*msg = &(*msg)[1..];
warn!(GDB, "packet error; first byte = '{:02X}'", first);
return self.nak();
}
}
if!msg.contains(&b'#') {
trace!(GDB, "request was missing '#' character");
return self.nak();
}
let (message_body, mut their_checksum_str) = split_at(&msg[1..], b'#')?;
if their_checksum_str.len() < 2 {
trace!(GDB, "request had a checksum of less than 2 digits");
return self.nak();
}
// Cut the checksum off at 2 characters, any input left after that might be another request.
*msg = &their_checksum_str[2..];
their_checksum_str = &their_checksum_str[..2];
let our_checksum = checksum(message_body);
let their_checksum = hex_to_int(their_checksum_str)?;
if our_checksum!= their_checksum {
warn!(GDB, "incorrect checksum: our_checksum = {}, their_checksum = {}", our_checksum, their_checksum);
return self.nak();
}
// The input is syntactically well-formed, we'll ack it now, then we can respond with
// an empty response if we don't actually understand the command we received.
self.ack()?;
let message_type = message_body[0];
let message_body = &message_body[1..];
match message_type {
b'?' => {
// Let's say we halted due to SIGINT
self.send_fmt(format_args!("S{:02}", SIGINT))?;
}
b'c' => {
self.do_continue(message_body)?;
}
b'D' => {
self.process_detach_command()?;
}
b'g' => {
self.read_gprs()?;
}
b'G' => {
self.write_gprs(message_body)?;
}
b'H' => {
// Sets the thread to use for subsequent invocations of a particular command.
// We only have 1 thread, so acknowledge and do nothing.
self.send(b"OK")?;
}
b'm' => {
self.read_memory(message_body)?;
}
b'M' => {
self.write_memory(message_body)?;
}
b'p' => {
self.read_gpr(message_body)?;
}
b'P' => {
self.write_gpr(message_body)?;
}
b'q' => {
self.process_qread_command(message_body)?;
}
b'Q' => {
self.process_qwrite_command(message_body)?;
}
b's' => {
self.do_step(message_body)?;
}
b'z' => {
self.process_z_command(message_body, false)?;
}
b'Z' => {
self.process_z_command(message_body, true)?;
}
_ => {
self.unrecognised_command()?;
}
}
Ok(())
}
fn process_qread_command(&mut self, msg: &[u8]) -> GResult {
match msg {
b"fThreadInfo" => {
// First thread in list: thread ID 1
self.send(b"m1")
}
b"sThreadInfo" => {
// End of list, thread ID 1 is the only thread
self.send(b"l")
}
b"C" => {
// The current thread is thread 1, we only have 1 thread..
self.send(b"QC1")
}
b"Attached" => {
// We, the GDB server, are always already attached to a process
self.send(b"1")
}
b"HostInfo" => {
const MACH_O_ARM: u32 = 12;
const MACH_O_ARM_V4T: u32 = 5;
self.send_fmt(format_args!("cputype:{};cpusubtype:{};ostype:none;vendor:none;endian:little;ptrsize:4;", MACH_O_ARM, MACH_O_ARM_V4T))
}
_ => {
if let Some(tail) = strip_prefix(msg, b"Supported:") {
self.process_qsupported_command(tail)
} else {
self.unrecognised_command()
}
}
}
}
fn process_qsupported_command(&mut self, msg: &[u8]) -> GResult {
let mut have_capabilities = Vec::new();
for requested_capability in msg.split(|&b| b == b';' || b == b',') {
match requested_capability {
b"swbreak+" | b"hwbreak+" => {
have_capabilities.push(requested_capability);
}
b"arm" => {
have_capabilities.push(b"arm+");
}
// TODO: Support "vContSupported+"?
_ => {}
}
}
let capability_string = have_capabilities.join(&b';');
self.send(&capability_string)
}
fn process_qwrite_command(&mut self, msg: &[u8]) -> GResult {
match msg {
b"StartNoAckMode" => {
self.no_ack_mode = true;
self.send(b"OK")
}
_ => {
self.unrecognised_command()
}
}
}
fn read_gprs(&mut self) -> GResult {
let mut reg_string = Vec::with_capacity(16 * 8);
for reg in self.gba.arm.regs[..REG_PC].iter() {
reg_string.write(&int_to_hex_le(*reg))?;
}
reg_string.write(&int_to_hex_le(self.gba.arm.current_pc()))?;
self.send(®_string)
}
fn write_gprs(&mut self, msg: &[u8]) -> GResult {
for (i, value) in msg.chunks_exact(8).map(hex_to_int_le).enumerate() {
self.gba.arm.set_reg(i, value?);
}
self.send(b"OK")
}
fn read_gpr(&mut self, msg: &[u8]) -> GResult {
let reg_index: usize = hex_to_int(msg)?;
let reg = if reg_index == 25 {
self.gba.arm.cpsr.into()
} else if reg_index == REG_PC {
self.gba.arm.current_pc()
} else if reg_index < 16 {
self.gba.arm.regs[reg_index]
} else {
return self.send(b"E00");
};
self.send(&int_to_hex_le(reg))
}
fn write_gpr(&mut self, msg: &[u8]) -> GResult {
let (reg_index_str, value_str) = split_at(msg, b'=')?;
let reg_index = hex_to_int(reg_index_str)?;
let value = hex_to_int_le(value_str)?;
self.gba.arm.set_reg(reg_index, value);
self.send(b"OK")
}
fn read_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let mut result = Vec::<u8>::with_capacity(2 * len as usize);
for i in addr..addr + len {
let (_, byte) = self.gba.debug_read8(i);
result.write_fmt(format_args!("{:02X}", byte))?;
}
self.send(&result)
}
fn write_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let (len_str, data_str) = split_at(len_str, b':')?;
let start_addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let data = data_str
.chunks(2)
.map(hex_to_int)
.collect::<Result<Vec<u8>, failure::Error>>()?;
for (addr, byte) in (start_addr..start_addr+len).zip(data) {
self.gba.debug_write8(addr, byte);
}
self.send(b"OK")
}
fn process_z_command(&mut self, msg: &[u8], is_insert: bool) -> GResult {
let (type_str, addr_str) = split_at(msg, b',')?;
let (addr_str, kind_str) = split_at(addr_str, b',')?;
let kind: u32 = hex_to_int(kind_str)?;
let start_addr = hex_to_int(addr_str)?;
let addr_set: &mut OrderedSet<u32> = match type_str {
b"0" | b"1" if kind!= 2 && kind!= 4 => {
return self.unrecognised_command();
}
b"0" => { // software breakpoint
// TODO: Does it matter that I'm just implementing this like a hardware breakpoint?
&mut self.bus_snooper.breakpoints
}
b"1" => { // hardware breakpoint
&mut self.bus_snooper.breakpoints
}
b"2" => { // write watchpoint
&mut self.bus_snooper.write_watchpoints
}
b"3" => { // read watchpoint
&mut self.bus_snooper.read_watchpoints
}
b"4" => { // access watchpoint
&mut self.bus_snooper.access_watchpoints
}
_ => {
return self.unrecognised_command();
}
};
for addr in start_addr..start_addr+kind {
if is_insert {
addr_set.insert(addr);
} else {
addr_set.remove(addr);
}
}
self.send(b"OK")
}
fn do_continue(&mut self, msg: &[u8]) -> GResult {
if!msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.run_state = RunState::Running;
Ok(())
}
fn do_step(&mut self, msg: &[u8]) -> GResult {
if!msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.step_gba();
let stop_reason = self.bus_snooper.stop_reason.take()
.unwrap_or(StopReason::Step);
self.send(&stop_reason.to_command())
}
fn step_gba(&mut self) {
let pc = self.gba.arm.regs[REG_PC] - self.gba.arm.get_op_size();
if self.bus_snooper.breakpoints.contains(pc) {
self.bus_snooper.stop_reason = Some(StopReason::Breakpoint(pc));
} else {
self.gba.step(&mut self.framebuffer);
}
}
fn process_detach_command(&mut self) -> GResult {
self.send(b"OK")?;
// Just close the stream, we have no other bookkeeping to do for detaching.
self.stream = None;
Ok(())
}
fn send_fmt(&mut self, args: Arguments) -> GResult {
let mut bytes = Vec::<u8>::new();
bytes.write_fmt(args)?;
self.send(&bytes)
}
fn send(&mut self, message: &[u8]) -> GResult {
let mut response = Vec::new();
response.push(b'$');
response.extend_from_slice(message);
response.push(b'#');
let checksum = checksum(message);
write!(response, "{:02X}", checksum)?;
self.send_raw(&response)
}
fn ack(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"+")
}
fn nak(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"-")
}
fn unrecognised_command(&mut self) -> GResult {
// https://www-zeuthen.desy.de/unix/unixguide/infohtml/gdb/Overview.html
// The empty response "$#00" indicates to the GDB client that the command is not supported
self.send(&[])
}
fn write_fmt(&mut self, args: Arguments) -> GResult {
use std::io::Write;
let mut v = Vec::new();
v.write_fmt(args)?;
Ok(())
}
fn send_raw(&mut self, bytes: &[u8]) -> GResult {
if let Some(stream) = self.stream.as_mut() {
let amount = stream.write(bytes);
trace!(GDB, "wrote {:?} bytes of {} ({:?})", amount, bytes.len(), bytes_as_ascii(bytes));
amount?;
} else {
trace!(GDB, "tried to send {} bytes but stream was None", bytes.len());
}
Ok(())
}
} | WriteWatchpoint(u32),
AccessWatchpoint(u32),
Breakpoint(u32),
Step,
}
impl StopReason {
fn to_command(&self) -> Vec<u8> {
let mut result = Vec::new();
match self {
StopReason::ReadWatchpoint(addr) => write!(result, "T{:02}rwatch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::WriteWatchpoint(addr) => write!(result, "T{:02}watch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::AccessWatchpoint(addr) => write!(result, "T{:02}awatch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::Breakpoint(_) => write!(result, "T{:02}hwbreak:", SIGTRAP),
|
#[derive(Debug)]
enum StopReason {
ReadWatchpoint(u32), | random_line_split |
gdb_stub.rs | note!(GDB, "TcpListener accepted a connection from {}", addr);
stream.set_nonblocking(!self.blocking)?;
self.no_ack_mode = false;
self.stream = Some(stream);
}
// Unwrapping because we ensured it's Some above
let stream = self.stream.as_mut().unwrap();
let mut bytes = [0u8; 1200];
let mut msg: &[u8];
if let Some(amount) = transpose_would_block(stream.read(&mut bytes[..]))? {
if amount == 0 {
trace!(GDB, "Received 0 bytes, closing TcpStream..");
self.stream = None;
return Ok(());
} else {
let ascii_string = bytes_as_ascii(&bytes[..amount]);
trace!(GDB, "Received {} bytes: {:?}", amount, ascii_string);
msg = &bytes[..amount];
}
} else {
return Ok(());
}
while!msg.is_empty() {
let prev = msg;
self.parse_message(&mut msg)?;
// parse_message() must adjust `msg` to exclude the input it consumed.
assert_ne!(prev, msg);
}
Ok(())
}
fn parse_message(&mut self, msg: &mut &[u8]) -> GResult {
match (*msg)[0] {
b'+' => { // ack
*msg = &(*msg)[1..];
return Ok(());
}
b'-' => { // nak
*msg = &(*msg)[1..];
return Ok(());
}
b'$' => {
// Continue on to process this command
}
0x03 => { // Enter debugger
*msg = &(*msg)[1..];
self.run_state = RunState::Paused;
return self.send_fmt(format_args!("S{:02}", SIGINT));
}
first => {
// Skip this character, try parsing from the next character onwards
*msg = &(*msg)[1..];
warn!(GDB, "packet error; first byte = '{:02X}'", first);
return self.nak();
}
}
if!msg.contains(&b'#') {
trace!(GDB, "request was missing '#' character");
return self.nak();
}
let (message_body, mut their_checksum_str) = split_at(&msg[1..], b'#')?;
if their_checksum_str.len() < 2 {
trace!(GDB, "request had a checksum of less than 2 digits");
return self.nak();
}
// Cut the checksum off at 2 characters, any input left after that might be another request.
*msg = &their_checksum_str[2..];
their_checksum_str = &their_checksum_str[..2];
let our_checksum = checksum(message_body);
let their_checksum = hex_to_int(their_checksum_str)?;
if our_checksum!= their_checksum {
warn!(GDB, "incorrect checksum: our_checksum = {}, their_checksum = {}", our_checksum, their_checksum);
return self.nak();
}
// The input is syntactically well-formed, we'll ack it now, then we can respond with
// an empty response if we don't actually understand the command we received.
self.ack()?;
let message_type = message_body[0];
let message_body = &message_body[1..];
match message_type {
b'?' => {
// Let's say we halted due to SIGINT
self.send_fmt(format_args!("S{:02}", SIGINT))?;
}
b'c' => {
self.do_continue(message_body)?;
}
b'D' => {
self.process_detach_command()?;
}
b'g' => {
self.read_gprs()?;
}
b'G' => {
self.write_gprs(message_body)?;
}
b'H' => {
// Sets the thread to use for subsequent invocations of a particular command.
// We only have 1 thread, so acknowledge and do nothing.
self.send(b"OK")?;
}
b'm' => {
self.read_memory(message_body)?;
}
b'M' => {
self.write_memory(message_body)?;
}
b'p' => {
self.read_gpr(message_body)?;
}
b'P' => {
self.write_gpr(message_body)?;
}
b'q' => {
self.process_qread_command(message_body)?;
}
b'Q' => {
self.process_qwrite_command(message_body)?;
}
b's' => {
self.do_step(message_body)?;
}
b'z' => {
self.process_z_command(message_body, false)?;
}
b'Z' => {
self.process_z_command(message_body, true)?;
}
_ => {
self.unrecognised_command()?;
}
}
Ok(())
}
fn process_qread_command(&mut self, msg: &[u8]) -> GResult {
match msg {
b"fThreadInfo" => {
// First thread in list: thread ID 1
self.send(b"m1")
}
b"sThreadInfo" => {
// End of list, thread ID 1 is the only thread
self.send(b"l")
}
b"C" => {
// The current thread is thread 1, we only have 1 thread..
self.send(b"QC1")
}
b"Attached" => {
// We, the GDB server, are always already attached to a process
self.send(b"1")
}
b"HostInfo" => {
const MACH_O_ARM: u32 = 12;
const MACH_O_ARM_V4T: u32 = 5;
self.send_fmt(format_args!("cputype:{};cpusubtype:{};ostype:none;vendor:none;endian:little;ptrsize:4;", MACH_O_ARM, MACH_O_ARM_V4T))
}
_ => {
if let Some(tail) = strip_prefix(msg, b"Supported:") {
self.process_qsupported_command(tail)
} else {
self.unrecognised_command()
}
}
}
}
fn process_qsupported_command(&mut self, msg: &[u8]) -> GResult {
let mut have_capabilities = Vec::new();
for requested_capability in msg.split(|&b| b == b';' || b == b',') {
match requested_capability {
b"swbreak+" | b"hwbreak+" => {
have_capabilities.push(requested_capability);
}
b"arm" => {
have_capabilities.push(b"arm+");
}
// TODO: Support "vContSupported+"?
_ => {}
}
}
let capability_string = have_capabilities.join(&b';');
self.send(&capability_string)
}
fn process_qwrite_command(&mut self, msg: &[u8]) -> GResult {
match msg {
b"StartNoAckMode" => {
self.no_ack_mode = true;
self.send(b"OK")
}
_ => {
self.unrecognised_command()
}
}
}
fn read_gprs(&mut self) -> GResult {
let mut reg_string = Vec::with_capacity(16 * 8);
for reg in self.gba.arm.regs[..REG_PC].iter() {
reg_string.write(&int_to_hex_le(*reg))?;
}
reg_string.write(&int_to_hex_le(self.gba.arm.current_pc()))?;
self.send(®_string)
}
fn write_gprs(&mut self, msg: &[u8]) -> GResult {
for (i, value) in msg.chunks_exact(8).map(hex_to_int_le).enumerate() {
self.gba.arm.set_reg(i, value?);
}
self.send(b"OK")
}
fn read_gpr(&mut self, msg: &[u8]) -> GResult {
let reg_index: usize = hex_to_int(msg)?;
let reg = if reg_index == 25 {
self.gba.arm.cpsr.into()
} else if reg_index == REG_PC {
self.gba.arm.current_pc()
} else if reg_index < 16 {
self.gba.arm.regs[reg_index]
} else {
return self.send(b"E00");
};
self.send(&int_to_hex_le(reg))
}
fn write_gpr(&mut self, msg: &[u8]) -> GResult {
let (reg_index_str, value_str) = split_at(msg, b'=')?;
let reg_index = hex_to_int(reg_index_str)?;
let value = hex_to_int_le(value_str)?;
self.gba.arm.set_reg(reg_index, value);
self.send(b"OK")
}
fn read_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let mut result = Vec::<u8>::with_capacity(2 * len as usize);
for i in addr..addr + len {
let (_, byte) = self.gba.debug_read8(i);
result.write_fmt(format_args!("{:02X}", byte))?;
}
self.send(&result)
}
fn write_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let (len_str, data_str) = split_at(len_str, b':')?;
let start_addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let data = data_str
.chunks(2)
.map(hex_to_int)
.collect::<Result<Vec<u8>, failure::Error>>()?;
for (addr, byte) in (start_addr..start_addr+len).zip(data) {
self.gba.debug_write8(addr, byte);
}
self.send(b"OK")
}
fn process_z_command(&mut self, msg: &[u8], is_insert: bool) -> GResult {
let (type_str, addr_str) = split_at(msg, b',')?;
let (addr_str, kind_str) = split_at(addr_str, b',')?;
let kind: u32 = hex_to_int(kind_str)?;
let start_addr = hex_to_int(addr_str)?;
let addr_set: &mut OrderedSet<u32> = match type_str {
b"0" | b"1" if kind!= 2 && kind!= 4 => {
return self.unrecognised_command();
}
b"0" => { // software breakpoint
// TODO: Does it matter that I'm just implementing this like a hardware breakpoint?
&mut self.bus_snooper.breakpoints
}
b"1" => { // hardware breakpoint
&mut self.bus_snooper.breakpoints
}
b"2" => { // write watchpoint
&mut self.bus_snooper.write_watchpoints
}
b"3" => { // read watchpoint
&mut self.bus_snooper.read_watchpoints
}
b"4" => { // access watchpoint
&mut self.bus_snooper.access_watchpoints
}
_ => {
return self.unrecognised_command();
}
};
for addr in start_addr..start_addr+kind {
if is_insert {
addr_set.insert(addr);
} else {
addr_set.remove(addr);
}
}
self.send(b"OK")
}
fn do_continue(&mut self, msg: &[u8]) -> GResult {
if!msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.run_state = RunState::Running;
Ok(())
}
fn do_step(&mut self, msg: &[u8]) -> GResult {
if!msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.step_gba();
let stop_reason = self.bus_snooper.stop_reason.take()
.unwrap_or(StopReason::Step);
self.send(&stop_reason.to_command())
}
fn step_gba(&mut self) {
let pc = self.gba.arm.regs[REG_PC] - self.gba.arm.get_op_size();
if self.bus_snooper.breakpoints.contains(pc) {
self.bus_snooper.stop_reason = Some(StopReason::Breakpoint(pc));
} else {
self.gba.step(&mut self.framebuffer);
}
}
fn process_detach_command(&mut self) -> GResult {
self.send(b"OK")?;
// Just close the stream, we have no other bookkeeping to do for detaching.
self.stream = None;
Ok(())
}
fn send_fmt(&mut self, args: Arguments) -> GResult {
let mut bytes = Vec::<u8>::new();
bytes.write_fmt(args)?;
self.send(&bytes)
}
fn send(&mut self, message: &[u8]) -> GResult {
let mut response = Vec::new();
response.push(b'$');
response.extend_from_slice(message);
response.push(b'#');
let checksum = checksum(message);
write!(response, "{:02X}", checksum)?;
self.send_raw(&response)
}
fn ack(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"+")
}
fn nak(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"-")
}
fn unrecognised_command(&mut self) -> GResult {
// https://www-zeuthen.desy.de/unix/unixguide/infohtml/gdb/Overview.html
// The empty response "$#00" indicates to the GDB client that the command is not supported
self.send(&[])
}
fn write_fmt(&mut self, args: Arguments) -> GResult {
use std::io::Write;
let mut v = Vec::new();
v.write_fmt(args)?;
Ok(())
}
fn send_raw(&mut self, bytes: &[u8]) -> GResult {
if let Some(stream) = self.stream.as_mut() {
let amount = stream.write(bytes);
trace!(GDB, "wrote {:?} bytes of {} ({:?})", amount, bytes.len(), bytes_as_ascii(bytes));
amount?;
} else {
trace!(GDB, "tried to send {} bytes but stream was None", bytes.len());
}
Ok(())
}
}
#[derive(Debug)]
enum StopReason {
ReadWatchpoint(u32),
WriteWatchpoint(u32),
AccessWatchpoint(u32),
Breakpoint(u32),
Step,
}
impl StopReason {
fn to_command(&self) -> Vec<u8> {
let mut result = Vec::new();
match self {
StopReason::ReadWatchpoint(addr) => write!(result, "T{:02}rwatch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::WriteWatchpoint(addr) => write!(result, "T{:02}watch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::AccessWatchpoint(addr) => write!(result, "T{:02}awatch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::Breakpoint(_) => write!(result, "T{:02}hwbreak:", SIGTRAP),
StopReason::Step => write!(result, "S{:02}", SIGTRAP),
}.unwrap();
result
}
}
pub struct BusDebugSnooper {
delegate: BusPtr,
breakpoints: OrderedSet<u32>,
read_watchpoints: OrderedSet<u32>,
write_watchpoints: OrderedSet<u32>,
access_watchpoints: OrderedSet<u32>,
stop_reason: Option<StopReason>,
}
impl BusDebugSnooper {
pub fn wrap(delegate: BusPtr) -> Box<BusDebugSnooper> {
Box::new(BusDebugSnooper {
delegate,
breakpoints: OrderedSet::new(),
read_watchpoints: OrderedSet::new(),
write_watchpoints: OrderedSet::new(),
access_watchpoints: OrderedSet::new(),
stop_reason: None,
})
}
fn check_read(&mut self, addr: u32) {
if self.read_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::ReadWatchpoint(addr));
} else if self.access_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::AccessWatchpoint(addr));
}
}
fn check_write(&mut self, addr: u32) {
if self.write_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::WriteWatchpoint(addr));
}
}
}
impl Bus for BusDebugSnooper {
fn read8(&mut self, addr: u32) -> u8 {
self.check_read(addr);
self.delegate.read8(addr)
}
fn | read16 | identifier_name |
|
gdb_stub.rs | self.no_ack_mode = false;
self.stream = Some(stream);
}
// Unwrapping because we ensured it's Some above
let stream = self.stream.as_mut().unwrap();
let mut bytes = [0u8; 1200];
let mut msg: &[u8];
if let Some(amount) = transpose_would_block(stream.read(&mut bytes[..]))? {
if amount == 0 {
trace!(GDB, "Received 0 bytes, closing TcpStream..");
self.stream = None;
return Ok(());
} else {
let ascii_string = bytes_as_ascii(&bytes[..amount]);
trace!(GDB, "Received {} bytes: {:?}", amount, ascii_string);
msg = &bytes[..amount];
}
} else {
return Ok(());
}
while!msg.is_empty() {
let prev = msg;
self.parse_message(&mut msg)?;
// parse_message() must adjust `msg` to exclude the input it consumed.
assert_ne!(prev, msg);
}
Ok(())
}
fn parse_message(&mut self, msg: &mut &[u8]) -> GResult {
match (*msg)[0] {
b'+' => { // ack
*msg = &(*msg)[1..];
return Ok(());
}
b'-' => { // nak
*msg = &(*msg)[1..];
return Ok(());
}
b'$' => {
// Continue on to process this command
}
0x03 => { // Enter debugger
*msg = &(*msg)[1..];
self.run_state = RunState::Paused;
return self.send_fmt(format_args!("S{:02}", SIGINT));
}
first => {
// Skip this character, try parsing from the next character onwards
*msg = &(*msg)[1..];
warn!(GDB, "packet error; first byte = '{:02X}'", first);
return self.nak();
}
}
if!msg.contains(&b'#') {
trace!(GDB, "request was missing '#' character");
return self.nak();
}
let (message_body, mut their_checksum_str) = split_at(&msg[1..], b'#')?;
if their_checksum_str.len() < 2 {
trace!(GDB, "request had a checksum of less than 2 digits");
return self.nak();
}
// Cut the checksum off at 2 characters, any input left after that might be another request.
*msg = &their_checksum_str[2..];
their_checksum_str = &their_checksum_str[..2];
let our_checksum = checksum(message_body);
let their_checksum = hex_to_int(their_checksum_str)?;
if our_checksum!= their_checksum {
warn!(GDB, "incorrect checksum: our_checksum = {}, their_checksum = {}", our_checksum, their_checksum);
return self.nak();
}
// The input is syntactically well-formed, we'll ack it now, then we can respond with
// an empty response if we don't actually understand the command we received.
self.ack()?;
let message_type = message_body[0];
let message_body = &message_body[1..];
match message_type {
b'?' => {
// Let's say we halted due to SIGINT
self.send_fmt(format_args!("S{:02}", SIGINT))?;
}
b'c' => {
self.do_continue(message_body)?;
}
b'D' => {
self.process_detach_command()?;
}
b'g' => {
self.read_gprs()?;
}
b'G' => {
self.write_gprs(message_body)?;
}
b'H' => {
// Sets the thread to use for subsequent invocations of a particular command.
// We only have 1 thread, so acknowledge and do nothing.
self.send(b"OK")?;
}
b'm' => {
self.read_memory(message_body)?;
}
b'M' => {
self.write_memory(message_body)?;
}
b'p' => {
self.read_gpr(message_body)?;
}
b'P' => {
self.write_gpr(message_body)?;
}
b'q' => {
self.process_qread_command(message_body)?;
}
b'Q' => {
self.process_qwrite_command(message_body)?;
}
b's' => {
self.do_step(message_body)?;
}
b'z' => {
self.process_z_command(message_body, false)?;
}
b'Z' => {
self.process_z_command(message_body, true)?;
}
_ => {
self.unrecognised_command()?;
}
}
Ok(())
}
fn process_qread_command(&mut self, msg: &[u8]) -> GResult {
match msg {
b"fThreadInfo" => {
// First thread in list: thread ID 1
self.send(b"m1")
}
b"sThreadInfo" => {
// End of list, thread ID 1 is the only thread
self.send(b"l")
}
b"C" => {
// The current thread is thread 1, we only have 1 thread..
self.send(b"QC1")
}
b"Attached" => {
// We, the GDB server, are always already attached to a process
self.send(b"1")
}
b"HostInfo" => {
const MACH_O_ARM: u32 = 12;
const MACH_O_ARM_V4T: u32 = 5;
self.send_fmt(format_args!("cputype:{};cpusubtype:{};ostype:none;vendor:none;endian:little;ptrsize:4;", MACH_O_ARM, MACH_O_ARM_V4T))
}
_ => {
if let Some(tail) = strip_prefix(msg, b"Supported:") {
self.process_qsupported_command(tail)
} else {
self.unrecognised_command()
}
}
}
}
fn process_qsupported_command(&mut self, msg: &[u8]) -> GResult {
let mut have_capabilities = Vec::new();
for requested_capability in msg.split(|&b| b == b';' || b == b',') {
match requested_capability {
b"swbreak+" | b"hwbreak+" => {
have_capabilities.push(requested_capability);
}
b"arm" => {
have_capabilities.push(b"arm+");
}
// TODO: Support "vContSupported+"?
_ => {}
}
}
let capability_string = have_capabilities.join(&b';');
self.send(&capability_string)
}
fn process_qwrite_command(&mut self, msg: &[u8]) -> GResult {
match msg {
b"StartNoAckMode" => {
self.no_ack_mode = true;
self.send(b"OK")
}
_ => {
self.unrecognised_command()
}
}
}
fn read_gprs(&mut self) -> GResult {
let mut reg_string = Vec::with_capacity(16 * 8);
for reg in self.gba.arm.regs[..REG_PC].iter() {
reg_string.write(&int_to_hex_le(*reg))?;
}
reg_string.write(&int_to_hex_le(self.gba.arm.current_pc()))?;
self.send(®_string)
}
fn write_gprs(&mut self, msg: &[u8]) -> GResult {
for (i, value) in msg.chunks_exact(8).map(hex_to_int_le).enumerate() {
self.gba.arm.set_reg(i, value?);
}
self.send(b"OK")
}
fn read_gpr(&mut self, msg: &[u8]) -> GResult {
let reg_index: usize = hex_to_int(msg)?;
let reg = if reg_index == 25 {
self.gba.arm.cpsr.into()
} else if reg_index == REG_PC {
self.gba.arm.current_pc()
} else if reg_index < 16 {
self.gba.arm.regs[reg_index]
} else {
return self.send(b"E00");
};
self.send(&int_to_hex_le(reg))
}
fn write_gpr(&mut self, msg: &[u8]) -> GResult {
let (reg_index_str, value_str) = split_at(msg, b'=')?;
let reg_index = hex_to_int(reg_index_str)?;
let value = hex_to_int_le(value_str)?;
self.gba.arm.set_reg(reg_index, value);
self.send(b"OK")
}
fn read_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let mut result = Vec::<u8>::with_capacity(2 * len as usize);
for i in addr..addr + len {
let (_, byte) = self.gba.debug_read8(i);
result.write_fmt(format_args!("{:02X}", byte))?;
}
self.send(&result)
}
fn write_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let (len_str, data_str) = split_at(len_str, b':')?;
let start_addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let data = data_str
.chunks(2)
.map(hex_to_int)
.collect::<Result<Vec<u8>, failure::Error>>()?;
for (addr, byte) in (start_addr..start_addr+len).zip(data) {
self.gba.debug_write8(addr, byte);
}
self.send(b"OK")
}
fn process_z_command(&mut self, msg: &[u8], is_insert: bool) -> GResult {
let (type_str, addr_str) = split_at(msg, b',')?;
let (addr_str, kind_str) = split_at(addr_str, b',')?;
let kind: u32 = hex_to_int(kind_str)?;
let start_addr = hex_to_int(addr_str)?;
let addr_set: &mut OrderedSet<u32> = match type_str {
b"0" | b"1" if kind!= 2 && kind!= 4 => {
return self.unrecognised_command();
}
b"0" => { // software breakpoint
// TODO: Does it matter that I'm just implementing this like a hardware breakpoint?
&mut self.bus_snooper.breakpoints
}
b"1" => { // hardware breakpoint
&mut self.bus_snooper.breakpoints
}
b"2" => { // write watchpoint
&mut self.bus_snooper.write_watchpoints
}
b"3" => { // read watchpoint
&mut self.bus_snooper.read_watchpoints
}
b"4" => { // access watchpoint
&mut self.bus_snooper.access_watchpoints
}
_ => {
return self.unrecognised_command();
}
};
for addr in start_addr..start_addr+kind {
if is_insert {
addr_set.insert(addr);
} else {
addr_set.remove(addr);
}
}
self.send(b"OK")
}
fn do_continue(&mut self, msg: &[u8]) -> GResult {
if!msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.run_state = RunState::Running;
Ok(())
}
fn do_step(&mut self, msg: &[u8]) -> GResult {
if!msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.step_gba();
let stop_reason = self.bus_snooper.stop_reason.take()
.unwrap_or(StopReason::Step);
self.send(&stop_reason.to_command())
}
fn step_gba(&mut self) {
let pc = self.gba.arm.regs[REG_PC] - self.gba.arm.get_op_size();
if self.bus_snooper.breakpoints.contains(pc) {
self.bus_snooper.stop_reason = Some(StopReason::Breakpoint(pc));
} else {
self.gba.step(&mut self.framebuffer);
}
}
fn process_detach_command(&mut self) -> GResult {
self.send(b"OK")?;
// Just close the stream, we have no other bookkeeping to do for detaching.
self.stream = None;
Ok(())
}
fn send_fmt(&mut self, args: Arguments) -> GResult {
let mut bytes = Vec::<u8>::new();
bytes.write_fmt(args)?;
self.send(&bytes)
}
fn send(&mut self, message: &[u8]) -> GResult {
let mut response = Vec::new();
response.push(b'$');
response.extend_from_slice(message);
response.push(b'#');
let checksum = checksum(message);
write!(response, "{:02X}", checksum)?;
self.send_raw(&response)
}
fn ack(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"+")
}
fn nak(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"-")
}
fn unrecognised_command(&mut self) -> GResult {
// https://www-zeuthen.desy.de/unix/unixguide/infohtml/gdb/Overview.html
// The empty response "$#00" indicates to the GDB client that the command is not supported
self.send(&[])
}
fn write_fmt(&mut self, args: Arguments) -> GResult {
use std::io::Write;
let mut v = Vec::new();
v.write_fmt(args)?;
Ok(())
}
fn send_raw(&mut self, bytes: &[u8]) -> GResult {
if let Some(stream) = self.stream.as_mut() {
let amount = stream.write(bytes);
trace!(GDB, "wrote {:?} bytes of {} ({:?})", amount, bytes.len(), bytes_as_ascii(bytes));
amount?;
} else {
trace!(GDB, "tried to send {} bytes but stream was None", bytes.len());
}
Ok(())
}
}
#[derive(Debug)]
enum StopReason {
ReadWatchpoint(u32),
WriteWatchpoint(u32),
AccessWatchpoint(u32),
Breakpoint(u32),
Step,
}
impl StopReason {
fn to_command(&self) -> Vec<u8> {
let mut result = Vec::new();
match self {
StopReason::ReadWatchpoint(addr) => write!(result, "T{:02}rwatch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::WriteWatchpoint(addr) => write!(result, "T{:02}watch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::AccessWatchpoint(addr) => write!(result, "T{:02}awatch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::Breakpoint(_) => write!(result, "T{:02}hwbreak:", SIGTRAP),
StopReason::Step => write!(result, "S{:02}", SIGTRAP),
}.unwrap();
result
}
}
pub struct BusDebugSnooper {
delegate: BusPtr,
breakpoints: OrderedSet<u32>,
read_watchpoints: OrderedSet<u32>,
write_watchpoints: OrderedSet<u32>,
access_watchpoints: OrderedSet<u32>,
stop_reason: Option<StopReason>,
}
impl BusDebugSnooper {
pub fn wrap(delegate: BusPtr) -> Box<BusDebugSnooper> {
Box::new(BusDebugSnooper {
delegate,
breakpoints: OrderedSet::new(),
read_watchpoints: OrderedSet::new(),
write_watchpoints: OrderedSet::new(),
access_watchpoints: OrderedSet::new(),
stop_reason: None,
})
}
fn check_read(&mut self, addr: u32) {
if self.read_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::ReadWatchpoint(addr));
} else if self.access_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::AccessWatchpoint(addr));
}
}
fn check_write(&mut self, addr: u32) {
if self.write_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::WriteWatchpoint(addr));
}
}
}
impl Bus for BusDebugSnooper {
fn read8(&mut self, addr: u32) -> u8 {
self.check_read(addr);
self.delegate.read8(addr)
}
fn read16(&mut self, addr: u32) -> u16 | {
self.check_read(addr);
self.delegate.read16(addr)
} | identifier_body |
|
declare.rs | /*!
Functionality for declaring Objective-C classes.
Classes can be declared using the `ClassDecl` struct. Instance variables and
methods can then be added before the class is ultimately registered.
# Example
The following example demonstrates declaring a class named `MyNumber` that has
one ivar, a `u32` named `_number` and a `number` method that returns it:
``` no_run
# #[macro_use] extern crate makepad_objc_sys;
# use makepad_objc_sys::declare::ClassDecl;
# use makepad_objc_sys::runtime::{Class, Object, Sel};
# fn main() {
let superclass = class!(NSObject);
let mut decl = ClassDecl::new("MyNumber", superclass).unwrap();
// Add an instance variable
decl.add_ivar::<u32>("_number");
// Add an ObjC method for getting the number
extern fn my_number_get(this: &Object, _cmd: Sel) -> u32 {
unsafe { *this.get_ivar("_number") }
}
unsafe {
decl.add_method(sel!(number),
my_number_get as extern fn(&Object, Sel) -> u32);
}
decl.register();
# }
```
*/
use std::ffi::CString;
use std::mem;
use std::ptr;
use runtime::{BOOL, Class, Imp, NO, Object, Protocol, Sel, self};
use {Encode, EncodeArguments, Encoding, Message};
/// Types that can be used as the implementation of an Objective-C method.
pub trait MethodImplementation {
/// The callee type of the method.
type Callee: Message;
/// The return type of the method.
type Ret: Encode;
/// The argument types of the method.
type Args: EncodeArguments;
/// Returns self as an `Imp` of a method.
fn imp(self) -> Imp;
}
macro_rules! method_decl_impl {
(-$s:ident, $r:ident, $f:ty, $($t:ident),*) => (
impl<$s, $r $(, $t)*> MethodImplementation for $f
where $s: Message, $r: Encode $(, $t: Encode)* {
type Callee = $s;
type Ret = $r;
type Args = ($($t,)*);
fn imp(self) -> Imp {
unsafe { mem::transmute(self) }
}
}
);
($($t:ident),*) => (
method_decl_impl!(-T, R, extern fn(&T, Sel $(, $t)*) -> R, $($t),*);
method_decl_impl!(-T, R, extern fn(&mut T, Sel $(, $t)*) -> R, $($t),*);
);
}
method_decl_impl!();
method_decl_impl!(A);
method_decl_impl!(A, B);
method_decl_impl!(A, B, C);
method_decl_impl!(A, B, C, D);
method_decl_impl!(A, B, C, D, E);
method_decl_impl!(A, B, C, D, E, F);
method_decl_impl!(A, B, C, D, E, F, G);
method_decl_impl!(A, B, C, D, E, F, G, H);
method_decl_impl!(A, B, C, D, E, F, G, H, I);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K, L);
fn count_args(sel: Sel) -> usize {
sel.name().chars().filter(|&c| c == ':').count()
}
fn method_type_encoding(ret: &Encoding, args: &[Encoding]) -> CString {
let mut types = ret.as_str().to_owned();
// First two arguments are always self and the selector
types.push_str(<*mut Object>::encode().as_str());
types.push_str(Sel::encode().as_str());
types.extend(args.iter().map(|e| e.as_str()));
CString::new(types).unwrap()
}
fn log2_align_of<T>() -> u8 {
let align = mem::align_of::<T>();
// Alignments are required to be powers of 2
debug_assert!(align.count_ones() == 1);
// log2 of a power of 2 is the number of trailing zeros
align.trailing_zeros() as u8
}
/// A type for declaring a new class and adding new methods and ivars to it
/// before registering it.
pub struct ClassDecl {
cls: *mut Class,
}
impl ClassDecl {
fn with_superclass(name: &str, superclass: Option<&Class>)
-> Option<ClassDecl> {
let name = CString::new(name).unwrap();
let super_ptr = superclass.map_or(ptr::null(), |c| c);
let cls = unsafe {
runtime::objc_allocateClassPair(super_ptr, name.as_ptr(), 0)
};
if cls.is_null() {
None
} else {
Some(ClassDecl { cls })
}
}
/// Constructs a `ClassDecl` with the given name and superclass.
/// Returns `None` if the class couldn't be allocated.
pub fn new(name: &str, superclass: &Class) -> Option<ClassDecl> {
ClassDecl::with_superclass(name, Some(superclass))
}
/**
Constructs a `ClassDecl` declaring a new root class with the given name.
Returns `None` if the class couldn't be allocated.
An implementation for `+initialize` must also be given; the runtime calls
this method for all classes, so it must be defined on root classes.
Note that implementing a root class is not a simple endeavor.
For example, your class probably cannot be passed to Cocoa code unless
the entire `NSObject` protocol is implemented.
Functionality it expects, like implementations of `-retain` and `-release`
used by ARC, will not be present otherwise.
*/
pub fn root(name: &str, intitialize_fn: extern fn(&Class, Sel))
-> Option<ClassDecl> {
let mut decl = ClassDecl::with_superclass(name, None);
if let Some(ref mut decl) = decl {
unsafe {
decl.add_class_method(sel!(initialize), intitialize_fn);
}
}
decl
}
/// Adds a method with the given name and implementation to self.
/// Panics if the method wasn't sucessfully added
/// or if the selector and function take different numbers of arguments.
/// Unsafe because the caller must ensure that the types match those that
/// are expected when the method is invoked from Objective-C.
pub unsafe fn add_method<F>(&mut self, sel: Sel, func: F)
where F: MethodImplementation<Callee=Object> {
let encs = F::Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&F::Ret::encode(), encs);
let success = runtime::class_addMethod(self.cls, sel, func.imp(),
types.as_ptr());
assert!(success!= NO, "Failed to add method {:?}", sel);
}
/// Adds a class method with the given name and implementation to self.
/// Panics if the method wasn't sucessfully added
/// or if the selector and function take different numbers of arguments.
/// Unsafe because the caller must ensure that the types match those that
/// are expected when the method is invoked from Objective-C.
pub unsafe fn add_class_method<F>(&mut self, sel: Sel, func: F)
where F: MethodImplementation<Callee=Class> {
let encs = F::Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&F::Ret::encode(), encs);
let metaclass = (*self.cls).metaclass() as *const _ as *mut _;
let success = runtime::class_addMethod(metaclass, sel, func.imp(),
types.as_ptr());
assert!(success!= NO, "Failed to add class method {:?}", sel);
}
/// Adds an ivar with type `T` and the provided name to self.
/// Panics if the ivar wasn't successfully added.
pub fn add_ivar<T>(&mut self, name: &str) where T: Encode {
let c_name = CString::new(name).unwrap();
let encoding = CString::new(T::encode().as_str()).unwrap();
let size = mem::size_of::<T>();
let align = log2_align_of::<T>();
let success = unsafe {
runtime::class_addIvar(self.cls, c_name.as_ptr(), size, align,
encoding.as_ptr())
};
assert!(success!= NO, "Failed to add ivar {}", name);
}
/// Adds a protocol to self. Panics if the protocol wasn't successfully
/// added
pub fn add_protocol(&mut self, proto: &Protocol) {
let success = unsafe { runtime::class_addProtocol(self.cls, proto) };
assert!(success!= NO, "Failed to add protocol {:?}", proto);
}
/// Registers self, consuming it and returning a reference to the
/// newly registered `Class`.
pub fn register(self) -> &'static Class {
unsafe {
let cls = self.cls;
runtime::objc_registerClassPair(cls);
// Forget self otherwise the class will be disposed in drop
mem::forget(self);
&*cls
}
}
}
impl Drop for ClassDecl {
fn drop(&mut self) {
unsafe {
runtime::objc_disposeClassPair(self.cls);
}
}
}
/// A type for declaring a new protocol and adding new methods to it
/// before registering it.
pub struct ProtocolDecl {
proto: *mut Protocol
}
impl ProtocolDecl {
/// Constructs a `ProtocolDecl` with the given name. Returns `None` if the
/// protocol couldn't be allocated.
pub fn new(name: &str) -> Option<ProtocolDecl> {
let c_name = CString::new(name).unwrap();
let proto = unsafe {
runtime::objc_allocateProtocol(c_name.as_ptr())
};
if proto.is_null() {
None
} else {
Some(ProtocolDecl { proto })
}
}
fn add_method_description_common<Args, Ret>(&mut self, sel: Sel, is_required: bool,
is_instance_method: bool)
where Args: EncodeArguments,
Ret: Encode {
let encs = Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&Ret::encode(), encs);
unsafe {
runtime::protocol_addMethodDescription(
self.proto, sel, types.as_ptr(), is_required as BOOL, is_instance_method as BOOL);
}
}
/// Adds an instance method declaration with a given description to self.
pub fn add_method_description<Args, Ret>(&mut self, sel: Sel, is_required: bool)
where Args: EncodeArguments,
Ret: Encode {
self.add_method_description_common::<Args, Ret>(sel, is_required, true)
}
/// Adds a class method declaration with a given description to self.
pub fn add_class_method_description<Args, Ret>(&mut self, sel: Sel, is_required: bool)
where Args: EncodeArguments,
Ret: Encode |
/// Adds a requirement on another protocol.
pub fn add_protocol(&mut self, proto: &Protocol) {
unsafe {
runtime::protocol_addProtocol(self.proto, proto);
}
}
/// Registers self, consuming it and returning a reference to the
/// newly registered `Protocol`.
pub fn register(self) -> &'static Protocol {
unsafe {
runtime::objc_registerProtocol(self.proto);
&*self.proto
}
}
}
/*
#[cfg(test)]
mod tests {
use test_utils;
#[test]
fn test_custom_class() {
// Registering the custom class is in test_utils
let obj = test_utils::custom_object();
unsafe {
let _: () = msg_send![obj, setFoo:13u32];
let result: u32 = msg_send![obj, foo];
assert!(result == 13);
}
}
#[test]
fn test_class_method() {
let cls = test_utils::custom_class();
unsafe {
let result: u32 = msg_send![cls, classFoo];
assert!(result == 7);
}
}
}*/
| {
self.add_method_description_common::<Args, Ret>(sel, is_required, false)
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.