file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | num() {
println!("Guess the number!");
let secret_number = rand::thread_rng().gen_range(1, 101);
loop {
println!("Please input your guess.");
let mut guess = String::new();
io::stdin()
.read_line(&mut guess)
.expect("Failed to read line");
let guess: u32 = match guess.trim().parse() {
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", guess);
match guess.cmp(&secret_number) {
Ordering::Less => println!("Too small!"),
Ordering::Greater => println!("Too big!"),
Ordering::Equal => {
println!("You win!");
break;
}
}
}
println!("The secret number is: {}", secret_number);
}
fn do_compound() {
let x: (i32, f64, u8) = (500, 6.4, 1);
let five_hundred = x.0;
let six_point_four = x.1;
let one = x.2;
println!(
"five_hundred: {}, six_point_four:{}, other:{}",
five_hundred, six_point_four, one
);
let a: [i32; 5] = [1, 2, 3, 4, 5];
println!(" Array element :{}", a[0]);
}
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &s[0..i];
}
}
&s[..]
}
fn string_slice() {
let my_string = String::from("Rust Async");
// first_word works on slices of `String`s
let _word = first_word(&my_string[..]);
let my_string_literal = "Rust Async";
// first_word works on slices of string literals
let _word = first_word(&my_string_literal[..]);
// Because string literals *are* string slices already,
// this works too, without the slice syntax!
let _word = first_word(my_string_literal);
println!(" word: {}", _word)
}
use std::collections::HashMap;
fn do_map() {
let mut map = HashMap::new();
map.insert(1, 2);
println!("map :{:?}", map);
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
let mut scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect();
println!("scores map :{:?}", scores);
for (key, value) in &scores {
println!("key:{}: value: {}", key, value);
}
let team_name = String::from("Blue");
println! {"team name : {:?}", scores.get(&team_name)};
let text = "hello world wonderful world";
let mut map = HashMap::new();
for word in text.split_whitespace() {
let count = map.entry(word).or_insert(10);
//println!("word: {}", word);
*count += 1;
println!("count:{}", *count);
}
println!("{:?}", map);
//
let mut s = String::from("你好");
s.push_str(", Bruce Li!");
s.push('耶');
println!("{}", s);
let s1 = String::from("Rust, ");
let s2 = String::from("faster!");
//// note s1 has been moved here and can no longer be used
let s3 = s1 + &s2;
println!("s3:{}", s3);
do_string();
}
fn do_string() {
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
let s = s1 + "-" + &s2 + "-" + &s3;
println!("s: {}", s);
let s4 = String::from("suffix!");
let s = format!("{}-{}-{}", s2, s3, s4);
println!("s: {}", s);
//.bytes() //raw number
// for c in s.chars() {
// println!("{}", c);
// }
}
fn do_err() {
use std::fs::File;
//other way: let f = File::open("hello.txt").unwrap();
//let f = File::open("hello.txt").expect("Failed to open hello.txt");
let f = File::open("README.md");
let f = match f {
Ok(file) => file,
Err(error) => panic!("Problem opening the file: {:?}", error),
};
//A Shortcut for Propagating Errors: the? Operator
}
fn largest(list: &[i32]) -> i32 {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
//Another way we could implement largest is for the function to
// return a reference to a T value in the slice. I
fn get_gt<T: PartialOrd + Copy>(list: &[T]) -> T {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
struct Point<T, U> {
x: T,
y: U,
}
impl<T, U> Point<T, U> {
fn mixup<V, W>(self, other: Point<V, W>) -> Point<T, W> {
Point {
x: self.x,
y: other.y,
}
}
}
fn do_trait() {
let number_list = vec![34, 50, 25, 100, 65];
let result = get_gt(&number_list);
println!("The largest number is {}", result);
let char_list = vec!['y','m', 'a', 'q'];
let result = get_gt(&char_list);
println!("The largest char is {}", result);
}
fn do_generic() {
let number_list = vec![34, 50, 25, 100, 65];
let result = largest(&number_list);
println!("The largest number is {}", result);
let number_list = vec![102, 34, 6000, 89, 54, 2, 43, 8];
let result = largest(&number_list);
println!("The largest number is {}", result);
let p1 = Point { x: 5, y: 10.4 };
let p2 = Point { x: "Hello", y: 'c' };
let p3 = p1.mixup(p2);
println!("p3.x = {}, p3.y = {}", p3.x, p3.y);
do_trait()
}
fn do_closure() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
let total: i32 = v1_iter.sum();
assert_eq!(total, 6);
let v1: Vec<i32> = vec![1, 2, 3];
let v2: Vec<_> = v1.iter().map(|x| x + 1).collect();
assert_eq!(v2, vec![2, 3, 4]);
guessing_number::run_shoes_test();
guessing_number::calling_next_directly();
}
fn do_smart_p() {
let x = 5;
let y = &x;
assert_eq!(5, x);
assert_eq!(5, *y);
let x1 = 5;
let y1 = Box::new(x);
assert_eq!(5, x1);
assert_eq!(5, *y1);
}
fn do_concurrency() {
use std::thread;
use std::time::Duration;
let handle = thread::spawn(|| {
for i in 1..6 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
}
});
for i in 1..5 {
println!("hi number {} from the main thread!", i);
thread::sleep(Duration::from_millis(1));
}
handle.join().unwrap();
do_concurrency1();
}
fn do_concurrency1() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let vals = vec![
String::from("你好!"),
String::from("你去做什么?"),
String::from("Why?"),
String::from("那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
// thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
do_concurrency2();
do_concurrency3();
do_match()
}
fn do_match_p() {
println!("one");
}
fn do_match() {
let x = 1;
match x {
1 => do_match_p(),
2 => println!("two"),
3 => println!("three"),
_ => println!("anything"),
}
//Matching Named Variables
let x = Some(5);
let y = 10;
match x {
Some(50) => println!("Got 50"),
Some(y) => println!("Matched, y = {:?}", y),
_ => println!("Default case, x = {:?}", x),
}
println!("at the end: x = {:?}, y = {:?}", x, y);
let x = 1;
match x {
1 | 2 => println!("one or two"),
3 => println!("three"),
_ => println!("anything"),
}
let x = 2;
match x {
1...5 => println!("one through five"),
_ => println!("something else"),
}
let x = 'A';
match x {
'a'...'j' => println!("early ASCII letter"),
'k'...'z' => println!("late ASCII letter"),
'A'...'Z' => println!("UP ASCII letter"),
_ => println!("something else"),
}
//Destructuring to Break Apart Values
let p = Point { x: 0, y: 7 };
match p {
Point { x, y: 0 } => println!("On the x axis at {}", x),
Point { x: 0, y } => println!("On the y axis at {}", y),
Point { x, y } => println!("On neither axis: ({}, {})", x, y),
}
let msg = Message::ChangeColor(Color::Hsv(0, 160, 255));
match msg {
Message::ChangeColor(Color::Rgb(r, g, b)) => {
println!("Change the color to red {}, green {}, and blue {}", r, g, b)
}
Message::ChangeColor(Color::Hsv(h, s, v)) => println!(
"Change the color to hue {}, saturation {}, and value {}",
h, s, v
),
_ => (),
}
//bind
do_match1();
//Rust's unsafe code
do_unsafe();
}
//Rust unsafe code demo
fn do_unsafe() {
//doesn’t enforce these memory safety guarantees.
//Gaining extra superpowers.
//You can take four actions in unsafe Rust
//Dereference a raw pointer
//Call an unsafe function or method
//Access or modify a mutable static variable
//Implement an unsafe trait
}
fn do_match1() {
let msg = MessageNum::Hello { id: 5 };
match msg {
MessageNum::Hello {
id: id_variable @ 3...7,
} => println!("Found an id in range: {}", id_variable),
MessageNum::Hello { id: 10...12 } => println!("Found an id in another range"),
MessageNum::Hello { id } => println!("Found some other id: {}", id),
}
}
enum MessageNum {
Hello { id: i32 },
}
enum Color {
Rgb(i32, i32, i32),
Hsv(i32, i32, i32),
}
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(Color),
}
//Similarities Between RefCell<T>/Rc<T> and Mutex<T>/Arc<T>
fn do_concurrency2() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
//VIP: producer and consumer model
let (tx, rx) = mpsc::channel();
let tx1 = mpsc::Sender::clone(&tx);
thread::spawn(move || {
let vals = vec![
String::from("1:你好!"),
String::from("1:你去做什么?"),
String::from("1:Why?"),
String::from("1:那很好呀!"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("2:你好!"),
String::from("2:你去做什么?"),
String::from("2:Why?"),
String::from("2:那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
}
fn do_concurrency3() {
use std::sync::{Arc, Mutex};
use std::thread;
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
println!("Result: {}", *counter.lock().unwrap());
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap());
}
trait Show {
fn show(&self) -> String;
}
impl Show for i32 {
fn show(&self) -> String {
format!("i32 value : {}", self)
}
}
impl Show for f64 {
fn show(&self) -> String {
format!("f64 value : {}", self)
}
}
trait Quack {
fn quack(&self);
}
struct Duck();
impl Quack for Duck {
fn quack(&self) {
println!("quack!");
}
}
struct RandomBird {
is_a_parrot: bool,
}
impl Quack for RandomBird {
fn quack(&self) {
if!self.is_a_parrot {
println!("quack!");
} else {
println!("squawk!");
}
}
}
// and why the hell not!
impl Quack for i32 {
fn quack(&self) {
for i in 0..*self {
print!("quack {} ", i);
}
println!();
}
}
trait Name {
fn name(&self) -> String;
fn upper_case(&self) -> String {
self.name().to_uppercase()
}
}
struct Toy();
impl Name for Toy {
fn name(&self) -> String {
"Toy".to_string()
}
}
fn quack() {
let duck1 = Duck();
let duck2 = RandomBird { is_a_parrot: false };
let parrot = RandomBird { is_a_parrot: true };
let i = 4;
let ducks: Vec<&Quack> = vec![&duck1, &duck2, &parrot, &i];
for d in &ducks {
d.quack();
}
let t = Toy();
assert_eq!(t.name(), "Toy".to_string());
assert_eq!(t.upper_case(), "TOY".to_string());
}
fn do_oop() {
let nvalue = Box::new(78);
let fvalue = Box::new(98.88);
let vc: Vec<Box<Show>> = vec![nvalue, fvalue];
for d in &vc {
println!("show {}", d.show());
}
//oop interface
quack();
}
fn do_float() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
println!("x:{}, y:{} ", x, y);
do_compound();
//expression
println!("zero number ; {}", zero_plus(23));
let a = [10, 20];
for element in a.iter() {
println!("the value is: {}", element);
}
for number in (1..4).rev() {
print!("{}, ", number);
}
//slice
let s = String::from("The Rust Programming Language");
let s1 = &s;
let s2 = &s;
println!("s1: {}, s2: {}", s1, s2);
let s3 = &s;
println!("s3: {}", s3);
string_slice();
do_struct();
do_map();
//do_err();
do_generic();
do_closure();
do_smart_p();
do_concurrency();
do_oop();
}
fn zero_plus(i: i32) -> i32 {
0 + i
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
//fn area(r: &Rectangle) -> u32 {
// r.height * r.width
//}
impl Rectangle {
fn area(&self) -> u32 {
self.height * self.width
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
fn square(size: u32) -> Rectangle {
Rectangle {
width: size,
height: size,
}
}
}
fn do_struct() {
let rect1 = Rectangle {
width: 20,
height: 50,
};
let rect2 = Rectangl |
fn do_init() {
//mut and default immutable
let mut i = 0;
println!("init i :{}", i);
i = 100;
println!("change i: {}", i);
// const declare
const MAX_POINTS: u32 = 100_000;
println!("constant variable MAX_POINT: {}", MAX_POINTS);
//shadowing
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
let spaces = " ";
let spaces = spaces.len();
println!("space number :{}", spaces);
// floating-point numbers
do_float();
//guess_num()
}
use std::fmt;
fn show_item<T: fmt::Display>(item: T) {
println!("Item: {}", item);
}
struct CanDisplay;
impl fmt::Display for CanDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CanDisplay")
}
}
struct AlsoDisplay;
impl fmt::Display for AlsoDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "AlsoDisplay")
}
}
//1. Static Dispatch
fn do_static_dispatch() {
let a: CanDisplay = CanDisplay;
let b: AlsoDisplay = AlsoDisplay;
show_item(a); // stdout `Item: CanDisplay`
show_item(b); // stdout `Item: AlsoDisplay`
}
fn get_numbers(a: u32, b: u32) -> impl Iterator<Item = u32> {
(a..b).filter(|x| x % 100 == 0)
}
//2. Dynamic Dispatch
// impl trait
fn do_advanced_trait() {
for n in get_numbers(100, 1001) {
print!("{} \t", n);
}
}
//3. Specifying Placeholder Types in Trait Definitions with Associated Types
// pub trait Iterator {
// type Item;
// fn next(&mut self) -> Option<Self::Item>;
// }
//// Item is the placeholder type.
///
// 4. Fully Qualified Syntax for Disambiguation: Calling Methods with the Same Name
trait Pilot {
fn fly(&self);
}
trait Wizard {
fn fly(&self);
}
struct Human;
impl Pilot for Human {
fn fly(&self) {
println!("This is your captain speaking. Pilot!");
}
}
impl Wizard for Human {
fn fly(&self) {
println!("Wizard, up!");
}
}
impl Human {
fn fly(&self) {
println!("*waving arms furiously*");
}
}
fn do_advanced_trait2() {
let person = Human;
Pilot::fly(&person);
Wizard::fly(&person);
person.fly();
}
trait Animal {
fn baby_name() -> String;
}
struct Dog;
impl Dog {
fn baby_name() -> String {
String::from("Spot")
}
}
impl Animal for Dog {
fn baby_name() -> String {
String::from("puppy")
}
}
fn do_advanced_trait3() {
println!("A baby dog is called a {}", Dog::baby_name());
println!("A baby dog is called a {}", <Dog as Animal>::baby_name());
}
trait OutlinePrint: fmt::Display {
fn outline_print(&self) {
let output = self.to_string();
let len = output.len();
println!("{}", "*".repeat(len + 4));
println!("*{}*", " ".repeat(len + 2));
println!("* {} *", output);
println!("*{}*", " ".repeat(len + 2));
println!("{}", "*".repeat(len + 4));
}
}
struct PointXY {
x: i32,
y: i32,
}
impl OutlinePrint for PointXY {}
impl fmt::Display for PointXY {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
//5. Using Super-traits to Require One Trait’s Functionality Within Another Trait
fn do_advanced_trait4() {
let xy = PointXY { x: 10, y: 30 };
xy.outline_print();
}
//6. Using the New-type Pattern to Implement External Traits on External Types
struct Wrapper(Vec<String>);
impl fmt::Display for Wrapper {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}]", self.0.join(", "))
}
}
fn do_advanced_trait5() {
let w = Wrapper(vec![String::from("Hi, "), String::from("Rust!")]);
println!("w = {}", w);
}
fn do_trait_dispatch() {
do_static_dispatch();
do_advanced_ | e {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("rect1 area: {}", rect1.area());
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
println!("rect1: {:?}", &(Rectangle::square(3)));
// println!(
// "The area of the rectangle is {} square pixels.",
// area(&rect1)
// );
// println!("rect1: {:?}", &rect1);
} | identifier_body |
tgsw.rs | use crate::numerics::Torus32;
use crate::polynomial::{IntPolynomial, Polynomial, TorusPolynomial};
use crate::tlwe::{TLweKey, TLweParameters, TLweSample};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswParams {
/// Decomposition length
l: i32,
/// log_2(Bg)
bg_bit: i32,
/// Decomposition base (must be a power of 2)
bg: i32,
/// Bg/2
half_bg: i32,
/// Bg-1
mask_mod: u32,
/// Parameters of each row
pub(crate) tlwe_params: TLweParameters,
/// Number of rows = (k+1)*l
kpl: i32,
/// powers of Bgbit
h: Vec<Torus32>,
/// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) +... + 2^(32-l*Bgbit))
offset: u32,
}
impl TGswParams {
pub fn new(l: i32, bg_bit: i32, tlwe_params: TLweParameters) -> Self {
let bg = (1 << bg_bit) as i32;
let half_bg = (bg >> 1) as i32;
let mask_mod = (bg - 1) as u32;
let mut h = vec![l];
for i in 0..l {
let kk = (32 - (i + 1)) * bg_bit;
// 1/(Bg^(i+1)) as a Torus32
h.push(1_i32.checked_shl(kk as u32).unwrap_or(0));
}
// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) +... + 2^(32-l*Bgbit))
let temp: u32 = (0..1).map(|i| 1 << (32 - (i + 1) * bg_bit)).sum();
let offset = temp * half_bg as u32;
let kpl = (tlwe_params.k + 1) * l;
Self {
l,
bg_bit,
bg,
half_bg,
mask_mod,
tlwe_params,
kpl,
h,
offset,
}
}
}
pub struct TGswKey {
/// Parameters of the TGswKey
pub(crate) params: TGswParams,
// the tlwe params of each rows
// tlwe_params: TLweParameters,
pub(crate) tlwe_key: TLweKey,
}
impl TGswKey {
/// Same key as in TLwe
pub(crate) fn generate(params: &TGswParams) -> Self {
let tlwe_params = params.tlwe_params.clone();
let tlwe_key = TLweKey::generate(&tlwe_params);
Self {
params: params.clone(),
// tlwe_params,
tlwe_key,
}
}
pub(crate) fn encrypt(&self, result: &mut TGswSample, message: i32, alpha: f64) {
result.encrypt_zero(alpha, self);
result.add_mu_int_h(message, &self.params);
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswSample {
/// (k+1)*l TLwe Sample
all_sample: Vec<Vec<TLweSample>>,
/// Horizontal blocks (l lines) of TGSW matrix
// bloc_sample: Vec<TLweSample>,
k: i32,
l: i32,
}
impl TGswSample {
pub(crate) fn new(params: &TGswParams) -> Self {
let k = params.tlwe_params.k;
// Lines / rows
let l = params.l;
// TODO: find out if this is correctamente
let all_sample = vec![vec![TLweSample::new(¶ms.tlwe_params); (k + 1) as usize]; l as usize];
Self { all_sample, k, l }
}
pub(crate) fn encrypt_zero(&mut self, alpha: f64, key: &TGswKey) {
let rl_key = &key.tlwe_key;
self.all_sample[0]
.iter_mut()
.for_each(|s| s.encrypt_zero(alpha, rl_key));
// for p in 0..kpl as usize {
// self.all_sample[0][p].encrypt_zero(alpha, rl_key);
// }
}
pub(crate) fn add_mu_int_h(&mut self, message: i32, params: &TGswParams) {
let h = ¶ms.h;
// TFHE comment: Compute self += H
// My comment: Compute self += H * message (ish)
self.all_sample = self
.all_sample
.iter()
.enumerate()
.map(|(i, is): (usize, &Vec<TLweSample>)| {
is.iter()
.map(|js: &TLweSample| {
let new_a: Vec<TorusPolynomial> = js
.a
.iter()
.map(|a: &TorusPolynomial| {
let new_coefs = a
.coefs
.iter()
.enumerate()
.map(|(coef_idx, coef): (usize, &i32)| {
if coef_idx == 0 {
coef + message * h[i]
} else {
*coef
}
})
.collect::<Vec<Torus32>>();
TorusPolynomial::from(new_coefs)
})
.collect();
TLweSample {
a: new_a,
..js.clone()
}
})
.collect()
})
.collect();
// Is equivalent to:
// for i in 0..l as usize {
// for j in 0..=k as usize {
// self.all_sample[i][j].a[j].coefs[0] += message * h[i];
// }
// }
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_h(&mut self, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
// compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
self.all_sample[i][j].a[j].coefs[0] += h[i];
}
}
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_mu_h(&mut self, message: &IntPolynomial, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
let mu = &message.coefs;
// Compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
let target = &mut self.all_sample[i][j].a[j].coefs;
println!("target coefs befor: {:?}", target);
target
.iter_mut()
.zip_eq(mu.iter())
.for_each(|(t, mu)| *t += mu * h[i]);
println!("target coefs after: {:?}", target);
// for jj in 0..n as usize {
// println!(
// "Target len: {}, mu len: {}, h len: {}, jj: {}, n: {}",
// target.len(),
// mu.len(),
// h.len(),
// jj,
// n
// );
// target[jj] += mu[jj] * h[i];
// }
}
}
}
}
/// Update l'accumulateur ligne 5 de l'algo toujours
/// void tGswTLweDecompH(IntPolynomial* result, const TLweSample* sample,const TGswParams* params);
/// accum * sample
pub(crate) fn tgsw_extern_mul_to_tlwe(
accum: &TLweSample,
sample: &TGswSample,
params: &TGswParams,
) -> TLweSample {
let par = ¶ms.tlwe_params;
let mut result = TLweSample {
a: accum
.a
.iter()
.map(|polynomial| TorusPolynomial::zero(polynomial.len()))
.collect(),
current_variance: 0_f64,
k: accum.k,
};
let mut dec = tgsw_tlwe_decomposition_h(accum, params);
let outer = dec.len();
let inner = dec[0].len();
let mut new_dec = vec![vec![IntPolynomial::zero(0); outer]; inner];
#[allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) | for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn test_add_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_h(¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { h[i] } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
fn random_int_polynomial(n: i32) -> IntPolynomial {
let mut rng = rand::thread_rng();
use rand::distributions::Distribution;
let d = rand_distr::Uniform::new(i32::MIN, i32::MAX);
let coefs: Vec<i32> = (0..n).map(|_| d.sample(&mut rng) % 10 - 5).collect();
assert_eq!(coefs.len() as i32, n);
IntPolynomial::from(coefs)
}
#[test]
#[ignore]
fn test_add_mu_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
let message = random_int_polynomial(n);
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_mu_h(&message, ¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi*mess:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
if j == u {
new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.zip_eq(message.coefs.iter())
.for_each(|((n, o), m)| assert_eq!(*n, *o + h[i] * (dbg!(*m))));
// for jj in 0..n as usize {
// assert_eq!(
// new_polynomial.coefs[jj],
// old_polynomial.coefs[jj] + h[i] * message.coefs[jj]
// );
// }
} else {
assert!(new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.all(|(a, b)| a == b));
}
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { dbg!(h[i]) } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
}
| {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs; | identifier_body |
tgsw.rs | use crate::numerics::Torus32;
use crate::polynomial::{IntPolynomial, Polynomial, TorusPolynomial};
use crate::tlwe::{TLweKey, TLweParameters, TLweSample};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswParams {
/// Decomposition length
l: i32,
/// log_2(Bg)
bg_bit: i32,
/// Decomposition base (must be a power of 2)
bg: i32,
/// Bg/2
half_bg: i32,
/// Bg-1
mask_mod: u32,
/// Parameters of each row
pub(crate) tlwe_params: TLweParameters,
/// Number of rows = (k+1)*l
kpl: i32,
/// powers of Bgbit
h: Vec<Torus32>,
/// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) +... + 2^(32-l*Bgbit))
offset: u32,
}
impl TGswParams {
pub fn new(l: i32, bg_bit: i32, tlwe_params: TLweParameters) -> Self {
let bg = (1 << bg_bit) as i32;
let half_bg = (bg >> 1) as i32;
let mask_mod = (bg - 1) as u32;
let mut h = vec![l];
for i in 0..l {
let kk = (32 - (i + 1)) * bg_bit;
// 1/(Bg^(i+1)) as a Torus32
h.push(1_i32.checked_shl(kk as u32).unwrap_or(0));
}
// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) +... + 2^(32-l*Bgbit))
let temp: u32 = (0..1).map(|i| 1 << (32 - (i + 1) * bg_bit)).sum();
let offset = temp * half_bg as u32;
let kpl = (tlwe_params.k + 1) * l;
Self {
l,
bg_bit,
bg,
half_bg,
mask_mod,
tlwe_params,
kpl,
h,
offset,
}
}
}
pub struct TGswKey {
/// Parameters of the TGswKey
pub(crate) params: TGswParams,
// the tlwe params of each rows
// tlwe_params: TLweParameters,
pub(crate) tlwe_key: TLweKey,
}
impl TGswKey {
/// Same key as in TLwe
pub(crate) fn generate(params: &TGswParams) -> Self {
let tlwe_params = params.tlwe_params.clone();
let tlwe_key = TLweKey::generate(&tlwe_params);
Self {
params: params.clone(),
// tlwe_params,
tlwe_key,
}
}
pub(crate) fn encrypt(&self, result: &mut TGswSample, message: i32, alpha: f64) {
result.encrypt_zero(alpha, self);
result.add_mu_int_h(message, &self.params);
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswSample {
/// (k+1)*l TLwe Sample
all_sample: Vec<Vec<TLweSample>>,
/// Horizontal blocks (l lines) of TGSW matrix
// bloc_sample: Vec<TLweSample>,
k: i32,
l: i32,
}
impl TGswSample {
pub(crate) fn new(params: &TGswParams) -> Self {
let k = params.tlwe_params.k;
// Lines / rows
let l = params.l;
// TODO: find out if this is correctamente
let all_sample = vec![vec![TLweSample::new(¶ms.tlwe_params); (k + 1) as usize]; l as usize];
Self { all_sample, k, l }
}
pub(crate) fn encrypt_zero(&mut self, alpha: f64, key: &TGswKey) {
let rl_key = &key.tlwe_key;
self.all_sample[0]
.iter_mut()
.for_each(|s| s.encrypt_zero(alpha, rl_key));
// for p in 0..kpl as usize {
// self.all_sample[0][p].encrypt_zero(alpha, rl_key);
// }
}
pub(crate) fn add_mu_int_h(&mut self, message: i32, params: &TGswParams) {
let h = ¶ms.h;
// TFHE comment: Compute self += H
// My comment: Compute self += H * message (ish)
self.all_sample = self
.all_sample
.iter()
.enumerate()
.map(|(i, is): (usize, &Vec<TLweSample>)| {
is.iter()
.map(|js: &TLweSample| {
let new_a: Vec<TorusPolynomial> = js
.a
.iter()
.map(|a: &TorusPolynomial| {
let new_coefs = a
.coefs
.iter()
.enumerate()
.map(|(coef_idx, coef): (usize, &i32)| {
if coef_idx == 0 {
coef + message * h[i]
} else {
*coef
}
})
.collect::<Vec<Torus32>>();
TorusPolynomial::from(new_coefs)
})
.collect();
TLweSample {
a: new_a,
..js.clone()
}
})
.collect()
})
.collect();
// Is equivalent to:
// for i in 0..l as usize {
// for j in 0..=k as usize {
// self.all_sample[i][j].a[j].coefs[0] += message * h[i];
// }
// }
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_h(&mut self, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
// compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
self.all_sample[i][j].a[j].coefs[0] += h[i];
}
}
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_mu_h(&mut self, message: &IntPolynomial, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
let mu = &message.coefs;
// Compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
let target = &mut self.all_sample[i][j].a[j].coefs;
println!("target coefs befor: {:?}", target);
target
.iter_mut()
.zip_eq(mu.iter())
.for_each(|(t, mu)| *t += mu * h[i]);
println!("target coefs after: {:?}", target);
// for jj in 0..n as usize {
// println!(
// "Target len: {}, mu len: {}, h len: {}, jj: {}, n: {}",
// target.len(),
// mu.len(),
// h.len(),
// jj,
// n
// );
// target[jj] += mu[jj] * h[i];
// }
}
}
}
}
/// Update l'accumulateur ligne 5 de l'algo toujours
/// void tGswTLweDecompH(IntPolynomial* result, const TLweSample* sample,const TGswParams* params);
/// accum * sample
pub(crate) fn tgsw_extern_mul_to_tlwe(
accum: &TLweSample,
sample: &TGswSample,
params: &TGswParams,
) -> TLweSample {
let par = ¶ms.tlwe_params;
let mut result = TLweSample {
a: accum
.a
.iter()
.map(|polynomial| TorusPolynomial::zero(polynomial.len()))
.collect(),
current_variance: 0_f64,
k: accum.k,
};
let mut dec = tgsw_tlwe_decomposition_h(accum, params);
let outer = dec.len();
let inner = dec[0].len();
let mut new_dec = vec![vec![IntPolynomial::zero(0); outer]; inner];
#[allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs;
for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn | () {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_h(¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { h[i] } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
fn random_int_polynomial(n: i32) -> IntPolynomial {
let mut rng = rand::thread_rng();
use rand::distributions::Distribution;
let d = rand_distr::Uniform::new(i32::MIN, i32::MAX);
let coefs: Vec<i32> = (0..n).map(|_| d.sample(&mut rng) % 10 - 5).collect();
assert_eq!(coefs.len() as i32, n);
IntPolynomial::from(coefs)
}
#[test]
#[ignore]
fn test_add_mu_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
let message = random_int_polynomial(n);
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_mu_h(&message, ¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi*mess:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
if j == u {
new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.zip_eq(message.coefs.iter())
.for_each(|((n, o), m)| assert_eq!(*n, *o + h[i] * (dbg!(*m))));
// for jj in 0..n as usize {
// assert_eq!(
// new_polynomial.coefs[jj],
// old_polynomial.coefs[jj] + h[i] * message.coefs[jj]
// );
// }
} else {
assert!(new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.all(|(a, b)| a == b));
}
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { dbg!(h[i]) } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
}
| test_add_h | identifier_name |
tgsw.rs | use crate::numerics::Torus32;
use crate::polynomial::{IntPolynomial, Polynomial, TorusPolynomial};
use crate::tlwe::{TLweKey, TLweParameters, TLweSample};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswParams {
/// Decomposition length
l: i32,
/// log_2(Bg)
bg_bit: i32,
/// Decomposition base (must be a power of 2)
bg: i32,
/// Bg/2
half_bg: i32,
/// Bg-1
mask_mod: u32,
/// Parameters of each row
pub(crate) tlwe_params: TLweParameters,
/// Number of rows = (k+1)*l
kpl: i32,
/// powers of Bgbit
h: Vec<Torus32>,
/// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) +... + 2^(32-l*Bgbit))
offset: u32,
}
impl TGswParams {
pub fn new(l: i32, bg_bit: i32, tlwe_params: TLweParameters) -> Self {
let bg = (1 << bg_bit) as i32;
let half_bg = (bg >> 1) as i32;
let mask_mod = (bg - 1) as u32;
let mut h = vec![l];
for i in 0..l {
let kk = (32 - (i + 1)) * bg_bit;
// 1/(Bg^(i+1)) as a Torus32
h.push(1_i32.checked_shl(kk as u32).unwrap_or(0));
}
// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) +... + 2^(32-l*Bgbit))
let temp: u32 = (0..1).map(|i| 1 << (32 - (i + 1) * bg_bit)).sum();
let offset = temp * half_bg as u32;
let kpl = (tlwe_params.k + 1) * l;
Self {
l,
bg_bit,
bg,
half_bg,
mask_mod,
tlwe_params,
kpl,
h,
offset,
}
}
}
pub struct TGswKey {
/// Parameters of the TGswKey
pub(crate) params: TGswParams,
// the tlwe params of each rows
// tlwe_params: TLweParameters,
pub(crate) tlwe_key: TLweKey,
}
impl TGswKey {
/// Same key as in TLwe
pub(crate) fn generate(params: &TGswParams) -> Self {
let tlwe_params = params.tlwe_params.clone();
let tlwe_key = TLweKey::generate(&tlwe_params);
Self {
params: params.clone(),
// tlwe_params,
tlwe_key,
}
}
pub(crate) fn encrypt(&self, result: &mut TGswSample, message: i32, alpha: f64) {
result.encrypt_zero(alpha, self);
result.add_mu_int_h(message, &self.params);
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswSample {
/// (k+1)*l TLwe Sample
all_sample: Vec<Vec<TLweSample>>,
/// Horizontal blocks (l lines) of TGSW matrix
// bloc_sample: Vec<TLweSample>,
k: i32,
l: i32,
}
impl TGswSample {
pub(crate) fn new(params: &TGswParams) -> Self {
let k = params.tlwe_params.k;
// Lines / rows
let l = params.l;
// TODO: find out if this is correctamente
let all_sample = vec![vec![TLweSample::new(¶ms.tlwe_params); (k + 1) as usize]; l as usize];
Self { all_sample, k, l }
}
pub(crate) fn encrypt_zero(&mut self, alpha: f64, key: &TGswKey) {
let rl_key = &key.tlwe_key;
self.all_sample[0]
.iter_mut()
.for_each(|s| s.encrypt_zero(alpha, rl_key));
// for p in 0..kpl as usize {
// self.all_sample[0][p].encrypt_zero(alpha, rl_key);
// }
}
pub(crate) fn add_mu_int_h(&mut self, message: i32, params: &TGswParams) {
let h = ¶ms.h;
// TFHE comment: Compute self += H
// My comment: Compute self += H * message (ish)
self.all_sample = self
.all_sample
.iter()
.enumerate()
.map(|(i, is): (usize, &Vec<TLweSample>)| {
is.iter()
.map(|js: &TLweSample| {
let new_a: Vec<TorusPolynomial> = js
.a
.iter()
.map(|a: &TorusPolynomial| {
let new_coefs = a
.coefs
.iter()
.enumerate()
.map(|(coef_idx, coef): (usize, &i32)| {
if coef_idx == 0 {
coef + message * h[i]
} else {
*coef
}
})
.collect::<Vec<Torus32>>();
TorusPolynomial::from(new_coefs)
})
.collect();
TLweSample {
a: new_a,
..js.clone()
}
})
.collect()
})
.collect();
// Is equivalent to:
// for i in 0..l as usize {
// for j in 0..=k as usize {
// self.all_sample[i][j].a[j].coefs[0] += message * h[i];
// }
// }
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_h(&mut self, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
// compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
self.all_sample[i][j].a[j].coefs[0] += h[i];
}
}
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_mu_h(&mut self, message: &IntPolynomial, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
let mu = &message.coefs;
// Compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
let target = &mut self.all_sample[i][j].a[j].coefs;
println!("target coefs befor: {:?}", target);
target
.iter_mut()
.zip_eq(mu.iter())
.for_each(|(t, mu)| *t += mu * h[i]);
println!("target coefs after: {:?}", target);
// for jj in 0..n as usize {
// println!(
// "Target len: {}, mu len: {}, h len: {}, jj: {}, n: {}",
// target.len(),
// mu.len(),
// h.len(),
// jj,
// n
// );
// target[jj] += mu[jj] * h[i];
// }
}
}
}
}
/// Update l'accumulateur ligne 5 de l'algo toujours
/// void tGswTLweDecompH(IntPolynomial* result, const TLweSample* sample,const TGswParams* params);
/// accum * sample
pub(crate) fn tgsw_extern_mul_to_tlwe(
accum: &TLweSample,
sample: &TGswSample,
params: &TGswParams,
) -> TLweSample {
let par = ¶ms.tlwe_params;
let mut result = TLweSample {
a: accum
.a
.iter()
.map(|polynomial| TorusPolynomial::zero(polynomial.len()))
.collect(),
current_variance: 0_f64,
k: accum.k,
};
let mut dec = tgsw_tlwe_decomposition_h(accum, params);
let outer = dec.len();
let inner = dec[0].len();
let mut new_dec = vec![vec![IntPolynomial::zero(0); outer]; inner];
#[allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs;
for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn test_add_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_h(¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { h[i] } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
fn random_int_polynomial(n: i32) -> IntPolynomial {
let mut rng = rand::thread_rng();
use rand::distributions::Distribution;
let d = rand_distr::Uniform::new(i32::MIN, i32::MAX);
let coefs: Vec<i32> = (0..n).map(|_| d.sample(&mut rng) % 10 - 5).collect();
assert_eq!(coefs.len() as i32, n);
IntPolynomial::from(coefs)
}
#[test]
#[ignore]
fn test_add_mu_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
let message = random_int_polynomial(n);
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_mu_h(&message, ¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi*mess:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
if j == u {
new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.zip_eq(message.coefs.iter())
.for_each(|((n, o), m)| assert_eq!(*n, *o + h[i] * (dbg!(*m))));
// for jj in 0..n as usize {
// assert_eq!(
// new_polynomial.coefs[jj],
// old_polynomial.coefs[jj] + h[i] * message.coefs[jj]
// );
// }
} else |
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { dbg!(h[i]) } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
}
| {
assert!(new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.all(|(a, b)| a == b));
} | conditional_block |
tgsw.rs | use crate::numerics::Torus32;
use crate::polynomial::{IntPolynomial, Polynomial, TorusPolynomial};
use crate::tlwe::{TLweKey, TLweParameters, TLweSample};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswParams {
/// Decomposition length
l: i32,
/// log_2(Bg)
bg_bit: i32,
/// Decomposition base (must be a power of 2)
bg: i32,
/// Bg/2
half_bg: i32,
/// Bg-1
mask_mod: u32,
/// Parameters of each row
pub(crate) tlwe_params: TLweParameters,
/// Number of rows = (k+1)*l
kpl: i32,
/// powers of Bgbit
h: Vec<Torus32>,
/// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) +... + 2^(32-l*Bgbit))
offset: u32,
}
impl TGswParams {
pub fn new(l: i32, bg_bit: i32, tlwe_params: TLweParameters) -> Self {
let bg = (1 << bg_bit) as i32;
let half_bg = (bg >> 1) as i32;
let mask_mod = (bg - 1) as u32;
let mut h = vec![l];
for i in 0..l {
let kk = (32 - (i + 1)) * bg_bit;
// 1/(Bg^(i+1)) as a Torus32
h.push(1_i32.checked_shl(kk as u32).unwrap_or(0));
}
// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) +... + 2^(32-l*Bgbit))
let temp: u32 = (0..1).map(|i| 1 << (32 - (i + 1) * bg_bit)).sum();
let offset = temp * half_bg as u32;
let kpl = (tlwe_params.k + 1) * l;
Self {
l,
bg_bit,
bg,
half_bg,
mask_mod,
tlwe_params,
kpl,
h,
offset,
}
}
}
pub struct TGswKey {
/// Parameters of the TGswKey
pub(crate) params: TGswParams,
// the tlwe params of each rows
// tlwe_params: TLweParameters,
pub(crate) tlwe_key: TLweKey,
}
impl TGswKey {
/// Same key as in TLwe
pub(crate) fn generate(params: &TGswParams) -> Self {
let tlwe_params = params.tlwe_params.clone();
let tlwe_key = TLweKey::generate(&tlwe_params);
Self {
params: params.clone(),
// tlwe_params,
tlwe_key,
}
}
pub(crate) fn encrypt(&self, result: &mut TGswSample, message: i32, alpha: f64) {
result.encrypt_zero(alpha, self);
result.add_mu_int_h(message, &self.params);
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswSample {
/// (k+1)*l TLwe Sample
all_sample: Vec<Vec<TLweSample>>,
/// Horizontal blocks (l lines) of TGSW matrix
// bloc_sample: Vec<TLweSample>,
k: i32,
l: i32,
}
impl TGswSample {
pub(crate) fn new(params: &TGswParams) -> Self {
let k = params.tlwe_params.k;
// Lines / rows
let l = params.l;
// TODO: find out if this is correctamente
let all_sample = vec![vec![TLweSample::new(¶ms.tlwe_params); (k + 1) as usize]; l as usize];
Self { all_sample, k, l }
}
pub(crate) fn encrypt_zero(&mut self, alpha: f64, key: &TGswKey) {
let rl_key = &key.tlwe_key;
self.all_sample[0]
.iter_mut()
.for_each(|s| s.encrypt_zero(alpha, rl_key));
// for p in 0..kpl as usize {
// self.all_sample[0][p].encrypt_zero(alpha, rl_key);
// }
}
pub(crate) fn add_mu_int_h(&mut self, message: i32, params: &TGswParams) {
let h = ¶ms.h;
// TFHE comment: Compute self += H
// My comment: Compute self += H * message (ish)
self.all_sample = self
.all_sample
.iter()
.enumerate()
.map(|(i, is): (usize, &Vec<TLweSample>)| {
is.iter()
.map(|js: &TLweSample| {
let new_a: Vec<TorusPolynomial> = js
.a
.iter()
.map(|a: &TorusPolynomial| {
let new_coefs = a
.coefs
.iter()
.enumerate()
.map(|(coef_idx, coef): (usize, &i32)| {
if coef_idx == 0 {
coef + message * h[i]
} else {
*coef
}
})
.collect::<Vec<Torus32>>();
TorusPolynomial::from(new_coefs)
})
.collect();
TLweSample {
a: new_a,
..js.clone()
}
})
.collect()
})
.collect();
// Is equivalent to:
// for i in 0..l as usize {
// for j in 0..=k as usize {
// self.all_sample[i][j].a[j].coefs[0] += message * h[i];
// }
// }
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_h(&mut self, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
// compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
self.all_sample[i][j].a[j].coefs[0] += h[i];
}
}
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_mu_h(&mut self, message: &IntPolynomial, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
let mu = &message.coefs;
// Compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
let target = &mut self.all_sample[i][j].a[j].coefs;
println!("target coefs befor: {:?}", target);
target
.iter_mut()
.zip_eq(mu.iter())
.for_each(|(t, mu)| *t += mu * h[i]);
println!("target coefs after: {:?}", target);
// for jj in 0..n as usize {
// println!(
// "Target len: {}, mu len: {}, h len: {}, jj: {}, n: {}",
// target.len(),
// mu.len(),
// h.len(),
// jj,
// n
// );
// target[jj] += mu[jj] * h[i];
// }
}
}
}
}
/// Update l'accumulateur ligne 5 de l'algo toujours
/// void tGswTLweDecompH(IntPolynomial* result, const TLweSample* sample,const TGswParams* params);
/// accum * sample
pub(crate) fn tgsw_extern_mul_to_tlwe(
accum: &TLweSample,
sample: &TGswSample,
params: &TGswParams,
) -> TLweSample {
let par = ¶ms.tlwe_params;
let mut result = TLweSample {
a: accum
.a
.iter()
.map(|polynomial| TorusPolynomial::zero(polynomial.len()))
.collect(),
current_variance: 0_f64,
k: accum.k,
};
let mut dec = tgsw_tlwe_decomposition_h(accum, params);
let outer = dec.len();
let inner = dec[0].len();
let mut new_dec = vec![vec![IntPolynomial::zero(0); outer]; inner];
#[allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs;
for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn test_add_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_h(¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { h[i] } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
fn random_int_polynomial(n: i32) -> IntPolynomial {
let mut rng = rand::thread_rng();
use rand::distributions::Distribution;
let d = rand_distr::Uniform::new(i32::MIN, i32::MAX);
let coefs: Vec<i32> = (0..n).map(|_| d.sample(&mut rng) % 10 - 5).collect();
assert_eq!(coefs.len() as i32, n);
IntPolynomial::from(coefs)
}
#[test]
#[ignore]
fn test_add_mu_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
let message = random_int_polynomial(n);
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_mu_h(&message, ¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi*mess:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
if j == u {
new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.zip_eq(message.coefs.iter())
.for_each(|((n, o), m)| assert_eq!(*n, *o + h[i] * (dbg!(*m))));
// for jj in 0..n as usize {
// assert_eq!(
// new_polynomial.coefs[jj],
// old_polynomial.coefs[jj] + h[i] * message.coefs[jj] | // );
// }
} else {
assert!(new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.all(|(a, b)| a == b));
}
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { dbg!(h[i]) } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
} | random_line_split |
|
trie.rs | use std::fmt::Debug;
use super::bit_cache::BitCache;
use crate::double_array::DoubleArray;
use bincode;
use serde::Serialize;
use serde::de::DeserializeOwned;
struct Node<T> {
key : u8,
values: Vec<T>,
nexts : Vec<Node<T>>,
}
/// トライ木の実装。
/// ダブル配列は直接構築することはできないので、トライ木を構築してから変換することで構築する。
///
/// # Examples
///
/// ```
/// use std::fmt::Debug;
/// use dary::DoubleArray;
/// use dary::Trie;
/// use serde_derive::{Serialize, Deserialize};
///
/// fn main() {
/// let key1 = String::from("foo");
/// let key2 = String::from("bar");
/// let key3 = String::from("baz");
///
/// let sample1 = Sample { surface: key1.clone(), cost: 1 };
/// let sample2 = Sample { surface: key1.clone(), cost: 2 };
/// let sample3 = Sample { surface: key2.clone(), cost: 1 };
/// let sample4 = Sample { surface: key3.clone(), cost: 1 };
///
/// let mut trie: Trie<Sample> = Trie::new();
/// trie.set(&key1, sample1.clone());
/// trie.set(&key1, sample2.clone());
/// trie.set(&key2, sample3.clone());
/// trie.set(&key3, sample4.clone());
///
/// let double_array = trie.to_double_array().ok().unwrap();
/// assert_eq!(vec![sample1, sample2], double_array.get(&key1).unwrap());
/// assert_eq!(vec![sample3] , double_array.get(&key2).unwrap());
/// assert_eq!(vec![sample4] , double_array.get(&key3).unwrap());
/// }
///
/// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
/// struct Sample {
/// surface: String,
/// cost: usize,
/// }
/// ```
pub struct Trie<T: Serialize + DeserializeOwned + Debug> {
root: Node<T>,
len: usize,
}
impl<T: Serialize + DeserializeOwned + Debug> Trie<T> {
pub fn new() -> Trie<T> {
Trie {
root: Node { key: 0, values: Vec::new(), nexts: Vec::new() },
len: 0,
}
}
/// trieにノードを追加する
/// 一つのkeyにつき256個までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) => {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
///
///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_array(self) -> Result<DoubleArray<T>, std::io::Error> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1); | while!stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if!node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize)!= 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts: vec![] },
Node::<u32> { key: 5 , values: vec![], nexts: vec![] },
Node::<u32> { key: 255, values: vec![], nexts: vec![] },
];
let mut bit_cache = BitCache::new();
// 探索開始位置 = 256。空きindex = 256
// base値 = 空きindex - 先頭ノードのkey = 256 - 2 = 254
assert_eq!(254, Trie::find_base(&nodes, &bit_cache));
// 0 ~ 399, 500 ~ 999 を埋める
(256..400).for_each(|i| bit_cache.set(i));
(500..1000).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1000
// base値 = 空きindex - 先頭ノードのkey = 1000 - 2 = 998
assert_eq!(998, Trie::find_base(&nodes, &bit_cache));
//1000..1002, 1003..1005, 1006..1255 を埋める
(1000..1002).for_each(|i| bit_cache.set(i));
(1003..1005).for_each(|i| bit_cache.set(i));
(1006..1255).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1002
// base値 = 空きindex - 先頭ノードのkey = 1002 - 2 = 1000
assert_eq!(1000, Trie::find_base(&nodes, &bit_cache));
// 400 ~ 500 を埋める
(400..500).for_each(|i| bit_cache.set(i));
// 探索開始位置=1216。空きindex = 1255
// base値 = 空きindex - 先頭ノードのkey = 1255 - 2 = 1253
bit_cache.update_start();
assert_eq!(1253, Trie::find_base(&nodes, &bit_cache));
}
#[test]
#[should_panic(expected = "探索すべきノードがありません")]
fn test_find_base_2() {
let nodes: Vec<Node<u32>> = vec![];
let bit_cache = BitCache::new();
// nodesが空でwith_zero=falseの場合は、base値を求められないのでpanic
Trie::find_base(&nodes, &bit_cache);
}
#[test]
fn test_to_double_array_1() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("ac");
let s3 = String::from("b");
let s4 = String::from("bd");
let s5 = String::from("bdc");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
trie.set(&s4, 5);
trie.set(&s5, 6);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3], double_array.get(&s2).unwrap());
assert_eq!(vec![4], double_array.get(&s3).unwrap());
assert_eq!(vec![5], double_array.get(&s4).unwrap());
assert_eq!(vec![6], double_array.get(&s5).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("ab"));
}
#[test]
fn test_to_double_array_2() {
let trie: Trie<u32> = Trie::new();
let double_array = trie.to_double_array().ok().unwrap();
// 遷移できない場合はpanicする
assert_eq!(None, double_array.get("abc"));
}
#[test]
fn test_to_double_array_3() {
// マルチバイト文字のテスト
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("おすしとビール");
let s2 = String::from("お寿司とビール");
let s3 = String::from("🍣🍺");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3] , double_array.get(&s2).unwrap());
assert_eq!(vec![4] , double_array.get(&s3).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("お寿"));
}
} | let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if !self.root.nexts.is_empty() {
stack.push((1, self.root));
}
| random_line_split |
trie.rs | use std::fmt::Debug;
use super::bit_cache::BitCache;
use crate::double_array::DoubleArray;
use bincode;
use serde::Serialize;
use serde::de::DeserializeOwned;
struct Node<T> {
key : u8,
values: Vec<T>,
nexts : Vec<Node<T>>,
}
/// トライ木の実装。
/// ダブル配列は直接構築することはできないので、トライ木を構築してから変換することで構築する。
///
/// # Examples
///
/// ```
/// use std::fmt::Debug;
/// use dary::DoubleArray;
/// use dary::Trie;
/// use serde_derive::{Serialize, Deserialize};
///
/// fn main() {
/// let key1 = String::from("foo");
/// let key2 = String::from("bar");
/// let key3 = String::from("baz");
///
/// let sample1 = Sample { surface: key1.clone(), cost: 1 };
/// let sample2 = Sample { surface: key1.clone(), cost: 2 };
/// let sample3 = Sample { surface: key2.clone(), cost: 1 };
/// let sample4 = Sample { surface: key3.clone(), cost: 1 };
///
/// let mut trie: Trie<Sample> = Trie::new();
/// trie.set(&key1, sample1.clone());
/// trie.set(&key1, sample2.clone());
/// trie.set(&key2, sample3.clone());
/// trie.set(&key3, sample4.clone());
///
/// let double_array = trie.to_double_array().ok().unwrap();
/// assert_eq!(vec![sample1, sample2], double_array.get(&key1).unwrap());
/// assert_eq!(vec![sample3] , double_array.get(&key2).unwrap());
/// assert_eq!(vec![sample4] , double_array.get(&key3).unwrap());
/// }
///
/// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
/// struct Sample {
/// surface: String,
/// cost: usize,
/// }
/// ```
pub struct Trie<T: Serialize + DeserializeOwned + Debug> {
root: Node<T>,
len: usize,
}
impl<T: Serialize + DeserializeOwned + Debug> Trie<T> {
pub fn new() -> Trie<T> {
Trie {
root: Node { key: 0, values: Vec::new(), nexts: Vec::new() },
len: 0,
}
}
/// trieにノードを追加する
/// 一つのkeyにつき256個までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) = | ///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_array(self) -> Result<DoubleArray<T>, std::io::Err
or> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1);
let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if!self.root.nexts.is_empty() {
stack.push((1, self.root));
}
while!stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if!node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize)!= 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts: vec![] },
Node::<u32> { key: 5 , values: vec![], nexts: vec![] },
Node::<u32> { key: 255, values: vec![], nexts: vec![] },
];
let mut bit_cache = BitCache::new();
// 探索開始位置 = 256。空きindex = 256
// base値 = 空きindex - 先頭ノードのkey = 256 - 2 = 254
assert_eq!(254, Trie::find_base(&nodes, &bit_cache));
// 0 ~ 399, 500 ~ 999 を埋める
(256..400).for_each(|i| bit_cache.set(i));
(500..1000).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1000
// base値 = 空きindex - 先頭ノードのkey = 1000 - 2 = 998
assert_eq!(998, Trie::find_base(&nodes, &bit_cache));
//1000..1002, 1003..1005, 1006..1255 を埋める
(1000..1002).for_each(|i| bit_cache.set(i));
(1003..1005).for_each(|i| bit_cache.set(i));
(1006..1255).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1002
// base値 = 空きindex - 先頭ノードのkey = 1002 - 2 = 1000
assert_eq!(1000, Trie::find_base(&nodes, &bit_cache));
// 400 ~ 500 を埋める
(400..500).for_each(|i| bit_cache.set(i));
// 探索開始位置=1216。空きindex = 1255
// base値 = 空きindex - 先頭ノードのkey = 1255 - 2 = 1253
bit_cache.update_start();
assert_eq!(1253, Trie::find_base(&nodes, &bit_cache));
}
#[test]
#[should_panic(expected = "探索すべきノードがありません")]
fn test_find_base_2() {
let nodes: Vec<Node<u32>> = vec![];
let bit_cache = BitCache::new();
// nodesが空でwith_zero=falseの場合は、base値を求められないのでpanic
Trie::find_base(&nodes, &bit_cache);
}
#[test]
fn test_to_double_array_1() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("ac");
let s3 = String::from("b");
let s4 = String::from("bd");
let s5 = String::from("bdc");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
trie.set(&s4, 5);
trie.set(&s5, 6);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3], double_array.get(&s2).unwrap());
assert_eq!(vec![4], double_array.get(&s3).unwrap());
assert_eq!(vec![5], double_array.get(&s4).unwrap());
assert_eq!(vec![6], double_array.get(&s5).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("ab"));
}
#[test]
fn test_to_double_array_2() {
let trie: Trie<u32> = Trie::new();
let double_array = trie.to_double_array().ok().unwrap();
// 遷移できない場合はpanicする
assert_eq!(None, double_array.get("abc"));
}
#[test]
fn test_to_double_array_3() {
// マルチバイト文字のテスト
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("おすしとビール");
let s2 = String::from("お寿司とビール");
let s3 = String::from("🍣🍺");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3] , double_array.get(&s2).unwrap());
assert_eq!(vec![4] , double_array.get(&s3).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("お寿"));
}
}
| > {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
/// | identifier_body |
trie.rs | use std::fmt::Debug;
use super::bit_cache::BitCache;
use crate::double_array::DoubleArray;
use bincode;
use serde::Serialize;
use serde::de::DeserializeOwned;
struct Node<T> {
key : u8,
values: Vec<T>,
nexts : Vec<Node<T>>,
}
/// トライ木の実装。
/// ダブル配列は直接構築することはできないので、トライ木を構築してから変換することで構築する。
///
/// # Examples
///
/// ```
/// use std::fmt::Debug;
/// use dary::DoubleArray;
/// use dary::Trie;
/// use serde_derive::{Serialize, Deserialize};
///
/// fn main() {
/// let key1 = String::from("foo");
/// let key2 = String::from("bar");
/// let key3 = String::from("baz");
///
/// let sample1 = Sample { surface: key1.clone(), cost: 1 };
/// let sample2 = Sample { surface: key1.clone(), cost: 2 };
/// let sample3 = Sample { surface: key2.clone(), cost: 1 };
/// let sample4 = Sample { surface: key3.clone(), cost: 1 };
///
/// let mut trie: Trie<Sample> = Trie::new();
/// trie.set(&key1, sample1.clone());
/// trie.set(&key1, sample2.clone());
/// trie.set(&key2, sample3.clone());
/// trie.set(&key3, sample4.clone());
///
/// let double_array = trie.to_double_array().ok().unwrap();
/// assert_eq!(vec![sample1, sample2], double_array.get(&key1).unwrap());
/// assert_eq!(vec![sample3] , double_array.get(&key2).unwrap());
/// assert_eq!(vec![sample4] , double_array.get(&key3).unwrap());
/// }
///
/// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
/// struct Sample {
/// surface: String,
/// cost: usize,
/// }
/// ```
pub struct Trie<T: Serialize + DeserializeOwned + Debug> {
root: Node<T>,
len: usize,
}
impl<T: Serialize + DeserializeOwned + Debug> Trie<T> {
pub fn new() -> Trie<T> {
Trie {
root: Node { key: 0, values: Vec::new(), nexts: Vec::new() },
len: 0,
}
}
/// trieにノードを追加する
/// 一つのkeyにつき256個までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) => {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
///
///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_array(self) -> Result<DoubleArray<T>, std::io::Error> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1);
let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if!self.root.nexts.is_empty() {
stack.push((1, self.root));
}
while!stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if!node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize)!= 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts: vec![] },
Node::<u32> { key: 5 , values: vec![], nexts: vec![] },
Node::<u32> { key: 255, values: vec![], nexts: vec![] },
];
let mut bit_cache = BitCache::new();
// 探索開始位置 = 256。空きindex = 256
// base値 = 空きindex - 先頭ノードのkey = 256 - 2 = 254
assert_eq!(254, Trie::find_base(&nodes, &bit_cache));
// 0 ~ 399, 500 ~ 999 を埋める
(256..400).for_each(|i| bit_cache.set(i));
(500..1000).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1000
// base値 = 空きindex - 先頭ノードのkey = 1000 - 2 = 998
assert_eq!(998, Trie::find_base(&nodes, &bit_cache));
//1000..1002, 1003..1005, 1006..1255 を埋める
(1000..1002).for_each(|i| bit_cache.set(i));
(1003..1005).for_each(|i| bit_cache.set(i));
(1006..1255).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1002
// base値 = 空きindex - 先頭ノードのkey = 1002 - 2 = 1000
assert_eq!(1000, Trie::find_base(&nodes, &bit_cache));
// 400 ~ 500 を埋める
(400..500).for_each(|i| bit_cache.set(i));
// 探索開始位置=1216。空きindex = 1255
// base値 = 空きindex - 先頭ノードのkey = 1255 - 2 = 1253
bit_cache.update_start();
assert_eq!(1253, Trie::find_base(&nodes, &bit_cache));
}
#[test]
#[should_panic(expected = "探索すべきノードがありません")]
fn test_find_base_2() {
let nodes: Vec<Node<u32>> = vec![];
let bit_cache = BitCache::new();
// nodesが空でwith_zero=falseの場合は、base値を求められないのでpanic
Trie::find_base(&nodes, &bit_cache);
}
#[test]
fn test_to_double_array_1() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("ac");
let s3 = String::from("b");
let s4 = String::from("bd");
let s5 = String::from("bdc");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
trie.set(&s4, 5);
trie.set(&s5, 6);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3], double_array.get(&s2).unwrap());
assert_eq!(vec![4], double_array.get(&s3).unwrap());
assert_eq!(vec![5], double_array.get(&s4).unwrap());
assert_eq!(vec![6], double_array.get(&s5).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("ab"));
}
#[test]
fn test_to_double_array_2() {
let trie: Trie<u32> = Trie::new();
let double_array = trie.to_double_array().ok().unwrap();
// 遷移できない場合はpanicする
assert_eq!(None, double_array.get("abc"));
}
#[test]
fn test_to_double_array_3() {
// マルチバイト文字のテスト
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("おすしとビール");
let s2 = String::from("お寿司とビール");
let s3 = String::from("🍣🍺");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3] , double_array.get(&s2).unwrap());
assert_eq!(vec![4] , double_array.get(&s3).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("お寿"));
}
}
| identifier_name |
||
trie.rs | use std::fmt::Debug;
use super::bit_cache::BitCache;
use crate::double_array::DoubleArray;
use bincode;
use serde::Serialize;
use serde::de::DeserializeOwned;
struct Node<T> {
key : u8,
values: Vec<T>,
nexts : Vec<Node<T>>,
}
/// トライ木の実装。
/// ダブル配列は直接構築することはできないので、トライ木を構築してから変換することで構築する。
///
/// # Examples
///
/// ```
/// use std::fmt::Debug;
/// use dary::DoubleArray;
/// use dary::Trie;
/// use serde_derive::{Serialize, Deserialize};
///
/// fn main() {
/// let key1 = String::from("foo");
/// let key2 = String::from("bar");
/// let key3 = String::from("baz");
///
/// let sample1 = Sample { surface: key1.clone(), cost: 1 };
/// let sample2 = Sample { surface: key1.clone(), cost: 2 };
/// let sample3 = Sample { surface: key2.clone(), cost: 1 };
/// let sample4 = Sample { surface: key3.clone(), cost: 1 };
///
/// let mut trie: Trie<Sample> = Trie::new();
/// trie.set(&key1, sample1.clone());
/// trie.set(&key1, sample2.clone());
/// trie.set(&key2, sample3.clone());
/// trie.set(&key3, sample4.clone());
///
/// let double_array = trie.to_double_array().ok().unwrap();
/// assert_eq!(vec![sample1, sample2], double_array.get(&key1).unwrap());
/// assert_eq!(vec![sample3] , double_array.get(&key2).unwrap());
/// assert_eq!(vec![sample4] , double_array.get(&key3).unwrap());
/// }
///
/// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
/// struct Sample {
/// surface: String,
/// cost: usize,
/// }
/// ```
pub struct Trie<T: Serialize + DeserializeOwned + Debug> {
root: Node<T>,
len: usize,
}
impl<T: Serialize + DeserializeOwned + Debug> Trie<T> {
pub fn new() -> Trie<T> {
Trie {
root: Node { key: 0, values: Vec::new(), nexts: Vec::new() },
len: 0,
}
}
/// trieにノードを追加する
/// 一つのkeyにつき256個までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) => {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
///
///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_ar | o::Error> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1);
let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if!self.root.nexts.is_empty() {
stack.push((1, self.root));
}
while!stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if!node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize)!= 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts: vec![] },
Node::<u32> { key: 5 , values: vec![], nexts: vec![] },
Node::<u32> { key: 255, values: vec![], nexts: vec![] },
];
let mut bit_cache = BitCache::new();
// 探索開始位置 = 256。空きindex = 256
// base値 = 空きindex - 先頭ノードのkey = 256 - 2 = 254
assert_eq!(254, Trie::find_base(&nodes, &bit_cache));
// 0 ~ 399, 500 ~ 999 を埋める
(256..400).for_each(|i| bit_cache.set(i));
(500..1000).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1000
// base値 = 空きindex - 先頭ノードのkey = 1000 - 2 = 998
assert_eq!(998, Trie::find_base(&nodes, &bit_cache));
//1000..1002, 1003..1005, 1006..1255 を埋める
(1000..1002).for_each(|i| bit_cache.set(i));
(1003..1005).for_each(|i| bit_cache.set(i));
(1006..1255).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1002
// base値 = 空きindex - 先頭ノードのkey = 1002 - 2 = 1000
assert_eq!(1000, Trie::find_base(&nodes, &bit_cache));
// 400 ~ 500 を埋める
(400..500).for_each(|i| bit_cache.set(i));
// 探索開始位置=1216。空きindex = 1255
// base値 = 空きindex - 先頭ノードのkey = 1255 - 2 = 1253
bit_cache.update_start();
assert_eq!(1253, Trie::find_base(&nodes, &bit_cache));
}
#[test]
#[should_panic(expected = "探索すべきノードがありません")]
fn test_find_base_2() {
let nodes: Vec<Node<u32>> = vec![];
let bit_cache = BitCache::new();
// nodesが空でwith_zero=falseの場合は、base値を求められないのでpanic
Trie::find_base(&nodes, &bit_cache);
}
#[test]
fn test_to_double_array_1() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("ac");
let s3 = String::from("b");
let s4 = String::from("bd");
let s5 = String::from("bdc");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
trie.set(&s4, 5);
trie.set(&s5, 6);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3], double_array.get(&s2).unwrap());
assert_eq!(vec![4], double_array.get(&s3).unwrap());
assert_eq!(vec![5], double_array.get(&s4).unwrap());
assert_eq!(vec![6], double_array.get(&s5).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("ab"));
}
#[test]
fn test_to_double_array_2() {
let trie: Trie<u32> = Trie::new();
let double_array = trie.to_double_array().ok().unwrap();
// 遷移できない場合はpanicする
assert_eq!(None, double_array.get("abc"));
}
#[test]
fn test_to_double_array_3() {
// マルチバイト文字のテスト
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("おすしとビール");
let s2 = String::from("お寿司とビール");
let s3 = String::from("🍣🍺");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3] , double_array.get(&s2).unwrap());
assert_eq!(vec![4] , double_array.get(&s3).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("お寿"));
}
}
| ray(self) -> Result<DoubleArray<T>, std::i | conditional_block |
main.rs | extern crate jemallocator;
extern crate num_cpus;
extern crate quick_protobuf;
mod osm_pbf;
use crossbeam_channel::{bounded, unbounded};
use crossbeam_utils::thread;
use memmap::MmapOptions;
use osm_pbf::{Blob, BlobHeader, DenseNodes, Info, Node, PrimitiveBlock, Relation, Way};
use quick_protobuf::{BytesReader, MessageRead};
use std::cmp::{max, min};
use std::fs::File;
use std::panic;
use std::process;
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct | {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0;
let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min {
osm_stats.lon_min = worker_stats.lon_min
}
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
}
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
0.000000001 * ((primitive.lat_offset + ((primitive.granularity as i64) * latitude)) as f64);
if latitude_f < osm_stats.lat_min {
osm_stats.lat_min = latitude_f
}
if latitude_f > osm_stats.lat_max {
osm_stats.lat_max = latitude_f
}
}
fn handle_longitude(osm_stats: &mut OsmStats, longitude: i64, primitive: &PrimitiveBlock) {
let longitude_f = 0.000000001
* ((primitive.lon_offset + ((primitive.granularity as i64) * longitude)) as f64);
if longitude_f < osm_stats.lon_min {
osm_stats.lon_min = longitude_f
}
if longitude_f > osm_stats.lon_max {
osm_stats.lon_max = longitude_f
}
}
fn empty_osm_stats() -> OsmStats {
OsmStats {
nodes: 0,
relations: 0,
timestamp_max: std::i64::MIN,
timestamp_min: std::i64::MAX,
ways: 0,
lat_min: 100.0,
lat_max: -100.0,
lon_max: -200.0,
lon_min: 200.0,
}
}
| OsmStats | identifier_name |
main.rs | extern crate jemallocator;
extern crate num_cpus;
extern crate quick_protobuf;
mod osm_pbf;
use crossbeam_channel::{bounded, unbounded};
use crossbeam_utils::thread;
use memmap::MmapOptions;
use osm_pbf::{Blob, BlobHeader, DenseNodes, Info, Node, PrimitiveBlock, Relation, Way};
use quick_protobuf::{BytesReader, MessageRead};
use std::cmp::{max, min};
use std::fs::File;
use std::panic;
use std::process;
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct OsmStats {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0; | let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min {
osm_stats.lon_min = worker_stats.lon_min
}
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
}
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
0.000000001 * ((primitive.lat_offset + ((primitive.granularity as i64) * latitude)) as f64);
if latitude_f < osm_stats.lat_min {
osm_stats.lat_min = latitude_f
}
if latitude_f > osm_stats.lat_max {
osm_stats.lat_max = latitude_f
}
}
fn handle_longitude(osm_stats: &mut OsmStats, longitude: i64, primitive: &PrimitiveBlock) {
let longitude_f = 0.000000001
* ((primitive.lon_offset + ((primitive.granularity as i64) * longitude)) as f64);
if longitude_f < osm_stats.lon_min {
osm_stats.lon_min = longitude_f
}
if longitude_f > osm_stats.lon_max {
osm_stats.lon_max = longitude_f
}
}
fn empty_osm_stats() -> OsmStats {
OsmStats {
nodes: 0,
relations: 0,
timestamp_max: std::i64::MIN,
timestamp_min: std::i64::MAX,
ways: 0,
lat_min: 100.0,
lat_max: -100.0,
lon_max: -200.0,
lon_min: 200.0,
}
} | random_line_split |
|
main.rs | extern crate jemallocator;
extern crate num_cpus;
extern crate quick_protobuf;
mod osm_pbf;
use crossbeam_channel::{bounded, unbounded};
use crossbeam_utils::thread;
use memmap::MmapOptions;
use osm_pbf::{Blob, BlobHeader, DenseNodes, Info, Node, PrimitiveBlock, Relation, Way};
use quick_protobuf::{BytesReader, MessageRead};
use std::cmp::{max, min};
use std::fs::File;
use std::panic;
use std::process;
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct OsmStats {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0;
let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min |
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
}
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
0.000000001 * ((primitive.lat_offset + ((primitive.granularity as i64) * latitude)) as f64);
if latitude_f < osm_stats.lat_min {
osm_stats.lat_min = latitude_f
}
if latitude_f > osm_stats.lat_max {
osm_stats.lat_max = latitude_f
}
}
fn handle_longitude(osm_stats: &mut OsmStats, longitude: i64, primitive: &PrimitiveBlock) {
let longitude_f = 0.000000001
* ((primitive.lon_offset + ((primitive.granularity as i64) * longitude)) as f64);
if longitude_f < osm_stats.lon_min {
osm_stats.lon_min = longitude_f
}
if longitude_f > osm_stats.lon_max {
osm_stats.lon_max = longitude_f
}
}
fn empty_osm_stats() -> OsmStats {
OsmStats {
nodes: 0,
relations: 0,
timestamp_max: std::i64::MIN,
timestamp_min: std::i64::MAX,
ways: 0,
lat_min: 100.0,
lat_max: -100.0,
lon_max: -200.0,
lon_min: 200.0,
}
}
| {
osm_stats.lon_min = worker_stats.lon_min
} | conditional_block |
main.rs | extern crate jemallocator;
extern crate num_cpus;
extern crate quick_protobuf;
mod osm_pbf;
use crossbeam_channel::{bounded, unbounded};
use crossbeam_utils::thread;
use memmap::MmapOptions;
use osm_pbf::{Blob, BlobHeader, DenseNodes, Info, Node, PrimitiveBlock, Relation, Way};
use quick_protobuf::{BytesReader, MessageRead};
use std::cmp::{max, min};
use std::fs::File;
use std::panic;
use std::process;
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct OsmStats {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0;
let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min {
osm_stats.lon_min = worker_stats.lon_min
}
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) |
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
0.000000001 * ((primitive.lat_offset + ((primitive.granularity as i64) * latitude)) as f64);
if latitude_f < osm_stats.lat_min {
osm_stats.lat_min = latitude_f
}
if latitude_f > osm_stats.lat_max {
osm_stats.lat_max = latitude_f
}
}
fn handle_longitude(osm_stats: &mut OsmStats, longitude: i64, primitive: &PrimitiveBlock) {
let longitude_f = 0.000000001
* ((primitive.lon_offset + ((primitive.granularity as i64) * longitude)) as f64);
if longitude_f < osm_stats.lon_min {
osm_stats.lon_min = longitude_f
}
if longitude_f > osm_stats.lon_max {
osm_stats.lon_max = longitude_f
}
}
fn empty_osm_stats() -> OsmStats {
OsmStats {
nodes: 0,
relations: 0,
timestamp_max: std::i64::MIN,
timestamp_min: std::i64::MAX,
ways: 0,
lat_min: 100.0,
lat_max: -100.0,
lon_max: -200.0,
lon_min: 200.0,
}
}
| {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
} | identifier_body |
lptim.rs |
use core::marker::PhantomData;
use embedded_time::duration::Microseconds;
use embedded_time::rate::Hertz;
use void::Void;
mod sealed {
pub trait Sealed {}
}
/// Low-Power Timer counting in one-shot mode.
pub enum OneShot {}
/// Low-Power Timer counting in periodic mode.
pub enum Periodic {}
/// Low-Power Timer in encoder mode.
pub enum Encoder {}
impl sealed::Sealed for OneShot {}
impl sealed::Sealed for Periodic {}
impl sealed::Sealed for Encoder {}
/// Marker trait for counter directions.
pub trait CountMode: sealed::Sealed {}
impl CountMode for OneShot {}
impl CountMode for Periodic {}
impl CountMode for Encoder {}
/// Clock source selection for the Low-Power Timer `LPTIM`.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum | {
/// Drive LPTIM with APB1 clock.
Apb1 = 0b00,
/// Drive LPTIM with Low-Speed Internal (LSI) clock.
///
/// The user has to ensure that the LSI clock is running, or the timer won't
/// start counting.
Lsi = 0b01,
/// Drive LPTIM with Internal 16 MHz clock.
Hsi16 = 0b10,
/// Drive LPTIM with Low-Speed External (LSE) clock at 32.768 kHz.
///
/// The user has to ensure that the LSE clock is running, or the timer won't
/// start counting.
Lse = 0b11,
}
/// Interrupt enable flags.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub struct Interrupts {
/// Encoder direction change to down.
pub enc_dir_down: bool,
/// Encoder direction change to up.
pub enc_dir_up: bool,
/// ARR register update successful.
pub autoreload_update_ok: bool,
/// CMP register update successful.
pub compare_update_ok: bool,
/// Valid edge on ext. trigger input.
pub ext_trig: bool,
/// ARR register matches current CNT value.
pub autoreload_match: bool,
/// CMP register matches current CNT value.
pub compare_match: bool,
}
/// Low-Power Timer (`LPTIM`).
///
/// The Low-Power Timer is a 16-bit timer with a prescaler of up to 128. It can run off of the APB1,
/// LSI, HSI16, or LSE clocks. With LSE, the slowest clock at 32.768 kHz, this results in a maximum
/// timeout of 256 seconds, or 4 minutes and 16 seconds.
///
/// The timer can be initialized either in one-shot mode or in periodic mode, using `init_oneshot`
/// or `init_periodic` respectively. In periodic mode, the embedded-hal `Periodic` marker trait is
/// implemented and the `CountDown` implementation uses `Hertz` as the time unit. In one-shot mode,
/// the `CountDown` implementation instead uses `Microseconds`, allowing for a multi-second timeout
/// to be configured (with the tradeoff being a larger code size due to use of 64-bit arithmetic).
pub struct LpTimer<M: CountMode> {
lptim: LPTIM,
input_freq: Hertz,
_mode: PhantomData<M>,
}
impl LpTimer<Periodic> {
/// Initializes the Low-Power Timer in periodic mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_periodic(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<OneShot> {
/// Initializes the Low-Power Timer in one-shot mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_oneshot(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<Encoder> {
/// Initializes the Low-Power Timer in encoder mode.
///
/// The `start` method must be called to enable the encoder input.
pub fn init_encoder(
lptim: LPTIM,
pwr: &mut PWR,
rcc: &mut Rcc,
clk: ClockSrc,
(pb5, pb7): (gpiob::PB5<gpio::Analog>, gpiob::PB7<gpio::Analog>),
) -> Self {
pb5.set_alt_mode(gpio::AltMode::AF2);
pb7.set_alt_mode(gpio::AltMode::AF2);
Self::init(lptim, pwr, rcc, clk)
}
// TODO: Dedupe this fn with configure() function in `impl<M: CountMode> LpTimer<M>`
fn configure_encoder(&mut self, arr: u16) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
// Configure in encoder mode
self.lptim.cfgr.write(|w| {
w
// Make sure prescaler is disabled. Encoder mode forbids prescaling.
.presc()
.div1()
// Put timer into encoder mode
.enc()
.set_bit()
// Choose internal clock source - external sources not supported in encoder mode.
.cksel()
.clear_bit()
// Start counting from software trigger
.trigen()
.sw()
// Clock polarity
.ckpol()
.both_edges()
});
// Enable timer
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(arr));
}
/// Enable the timer and begin counting encoder pulses.
///
/// The provided value is stored in the ARR (Auto Reload Register). The timer's internal counter
/// will wrap when this value is reached.
pub fn enable(&mut self, arr: u16) {
self.configure_encoder(arr);
// Enable timer, enable continuous mode
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
/// Disable the timer.
pub fn disable(&mut self) {
self.lptim.cr.write(|w| w.enable().clear_bit());
}
/// Get the current count of the encoder.
pub fn count(&self) -> u16 {
(self.lptim.cnt.read().bits() & 0xffff) as u16
}
/// Clear all LPTIM interrupt flags
pub fn clear_flags(&self) {
self.lptim.icr.write(|w| unsafe { w.bits(0x7f) });
}
}
impl<M: CountMode> LpTimer<M> {
fn init(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
// `pwr` is not used. It is used as a marker that guarantees that `PWR.CR` is set so this
// function can set the `RCC.LSEON` bit, which is otherwise write protected.
let _ = pwr;
// Enable selected clock and determine its frequency
let input_freq = match clk {
ClockSrc::Apb1 => rcc.clocks.apb1_clk(), // always enabled
ClockSrc::Lsi => {
// Turn on LSI
rcc.rb.csr.modify(|_, w| w.lsion().set_bit());
// Wait for LSI to be ready
while rcc.rb.csr.read().lsirdy().bit_is_clear() {}
Hertz(37_000)
}
ClockSrc::Hsi16 => {
// Turn on HSI16
rcc.rb.cr.modify(|_, w| w.hsi16on().set_bit());
// Wait for HSI16 to be ready
while rcc.rb.cr.read().hsi16rdyf().bit_is_clear() {}
Hertz(16_000_000)
}
ClockSrc::Lse => {
// Turn on LSE
rcc.rb.csr.modify(|_, w| w.lseon().set_bit());
// Wait for LSE to be ready
while rcc.rb.csr.read().lserdy().bit_is_clear() {}
Hertz(32_768)
}
};
// Select and enable clock. Right now we only support the internal RCC clocks, but LPTIM can
// also run as a counter with a dedicated external input.
rcc.rb.ccipr.modify(|_, w| w.lptim1sel().bits(clk as u8));
LPTIM::enable(rcc);
LPTIM::reset(rcc);
Self {
lptim,
input_freq,
_mode: PhantomData,
}
}
/// Disables the timer and configures it so that starting it will make it fire at the given
/// frequency.
fn configure(&mut self, conf: TimeConf) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
self.lptim
.cfgr
.write(|w| w.presc().bits(conf.psc_encoded).timout().set_bit());
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(conf.arr));
}
/// Disables and destructs the timer, returning the raw `LPTIM` peripheral.
pub fn free(self) -> LPTIM {
self.lptim.cr.reset();
self.lptim
}
/// Disables the timer and enables the given interrupts.
pub fn enable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().enabled();
}
if interrupts.enc_dir_up {
w.upie().enabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().enabled();
}
if interrupts.compare_update_ok {
w.cmpokie().enabled();
}
if interrupts.ext_trig {
w.exttrigie().enabled();
}
if interrupts.autoreload_match {
w.arrmie().enabled();
}
if interrupts.compare_match {
w.cmpmie().enabled();
}
w
})
}
/// Disables the timer and disables the given interrupts.
pub fn disable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().disabled();
}
if interrupts.enc_dir_up {
w.upie().disabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().disabled();
}
if interrupts.compare_update_ok {
w.cmpokie().disabled();
}
if interrupts.ext_trig {
w.exttrigie().disabled();
}
if interrupts.autoreload_match {
w.arrmie().disabled();
}
if interrupts.compare_match {
w.cmpmie().disabled();
}
w
})
}
}
impl hal::timer::CountDown for LpTimer<Periodic> {
type Time = Hertz;
fn start<T>(&mut self, freq: T)
where
T: Into<Hertz>,
{
self.configure(TimeConf::calculate_freq(self.input_freq, freq.into()));
// Start LPTIM in continuous mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
impl hal::timer::Periodic for LpTimer<Periodic> {}
impl hal::timer::CountDown for LpTimer<OneShot> {
type Time = Microseconds;
fn start<T>(&mut self, period: T)
where
T: Into<Microseconds>,
{
self.configure(TimeConf::calculate_period(self.input_freq, period.into()));
// Start LPTIM in one-shot mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().sngstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
#[derive(Copy, Clone)]
struct TimeConf {
psc_encoded: u8,
arr: u16,
}
impl TimeConf {
const ARR_MAX: u16 = u16::max_value();
/// Calculates prescaler and autoreload value for producing overflows at a rate of
/// `output_freq`.
fn calculate_freq(input_freq: Hertz, output_freq: Hertz) -> Self {
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = (Fi / psc) / ARR
//
// Therefore:
// Fo * ARR = Fi / psc
// Fo * ARR * psc = Fi
// ARR = (Fi / Fo) / psc
// psc = (Fi / Fo) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen frequency is too slow for the timer and
// we panic. Otherwise we use that `psc` to calculate the real `ARR`.
// Add `ARR_MAX - 1` to round the result upwards
let psc = ((input_freq.0 / output_freq.0) + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128);
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = u16::try_from((input_freq.0 / output_freq.0) / psc).unwrap();
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
/// Calculates prescaler and autoreload value for producing overflows after every
/// `output_period`.
fn calculate_period(input_freq: Hertz, output_period: Microseconds) -> Self {
// Here, the `output_period` can be very long, resulting in an output frequency of < 1 Hz.
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// Po = 1 / Fo = Output Period
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = 1 / Po = (Fi / psc) / ARR
//
// Therefore:
// ARR / Po = Fi / psc
// (ARR * psc) / Po = Fi
// ARR * psc = Fi * Po
// ARR = (Fi * Po) / psc
// psc = (Fi * Po) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen period is too long for the timer and we
// panic. Otherwise we use that `psc` to calculate the real `ARR`.
// First, calculate the product `Fi * Po`. Since `output_period` is in µs, we have to divide
// it by 1_000_000 to get seconds, without losing much precision. We can divide either of
// the multiplicants, or the resulting product. Dividing the resulting product results in
// the least amount of rouding error, but might require 64-bit multiplication and division,
// which is very expensive. Dividing either of the multiplicands by 1_000_000 can easily
// result in significant rounding error that makes this API useless.
let fi_po = u32(u64(input_freq.0) * u64(output_period.0) / 1_000_000).unwrap();
// Add `ARR_MAX - 1` to round the result upwards
let psc = (fi_po + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
assert!(psc > 0); // if 0, the output period is too short to be produced from input_freq
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128); // if > 128, the output period is too long to be produced from input_freq
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = (fi_po / psc) as u16;
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Test-only methods.
impl TimeConf {
fn psc(&self) -> u8 {
1 << self.psc_encoded
}
/// Calculates the output frequency if the timer is configured according to `self` and is run at
/// `input_freq`.
fn output_freq(&self, input_freq: Hertz) -> Hertz {
Hertz(input_freq.0 / u32(self.psc()) / u32(self.arr))
}
fn output_period(&self, input_freq: Hertz) -> Microseconds {
Microseconds(
u32(u64(self.psc()) * u64(self.arr) * 1_000_000 / u64(input_freq.0)).unwrap(),
)
}
}
#[test]
fn calc_from_freq() {
// no psc necessary (so psc=1)
let c = TimeConf::calculate_freq(32_768.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 32_768);
assert_eq!(c.output_freq(32_768.hz()), 1.hz());
// barely works with psc=1
let c = TimeConf::calculate_freq(65535.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq(65535.hz()), 1.hz());
// barely needs psc=2
let c = TimeConf::calculate_freq(65536.hz(), 1.hz());
assert_eq!(c.psc(), 2);
assert_eq!(c.arr, 32768);
assert_eq!(c.output_freq(65536.hz()), 1.hz());
// maximum possible ratio, needs psc=128 and max ARR
let c = TimeConf::calculate_freq((65535 * 128).hz(), 1.hz());
assert_eq!(c.psc(), 128);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq((65535 * 128).hz()), 1.hz());
}
#[test]
#[should_panic(expected = "assertion failed: psc <= 128")]
fn freq_ratio_too_large() {
TimeConf::calculate_freq((65535 * 128 + 1).hz(), 1.hz());
}
#[test]
fn calc_from_period() {
// 1:1 ratio
let c = TimeConf::calculate_period(1_000.hz(), 1_000.us());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 1);
assert_eq!(c.output_freq(1_000.hz()), 1_000.hz());
// real-world test: go from 32.768 kHz to 10 s
let c = TimeConf::calculate_period(32_768.hz(), 10 | ClockSrc | identifier_name |
lptim.rs |
use core::marker::PhantomData;
use embedded_time::duration::Microseconds;
use embedded_time::rate::Hertz;
use void::Void;
mod sealed {
pub trait Sealed {}
}
/// Low-Power Timer counting in one-shot mode.
pub enum OneShot {}
/// Low-Power Timer counting in periodic mode.
pub enum Periodic {}
/// Low-Power Timer in encoder mode.
pub enum Encoder {}
impl sealed::Sealed for OneShot {}
impl sealed::Sealed for Periodic {}
impl sealed::Sealed for Encoder {}
/// Marker trait for counter directions.
pub trait CountMode: sealed::Sealed {}
impl CountMode for OneShot {}
impl CountMode for Periodic {}
impl CountMode for Encoder {}
/// Clock source selection for the Low-Power Timer `LPTIM`.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ClockSrc {
/// Drive LPTIM with APB1 clock.
Apb1 = 0b00,
/// Drive LPTIM with Low-Speed Internal (LSI) clock.
///
/// The user has to ensure that the LSI clock is running, or the timer won't
/// start counting.
Lsi = 0b01,
/// Drive LPTIM with Internal 16 MHz clock.
Hsi16 = 0b10,
/// Drive LPTIM with Low-Speed External (LSE) clock at 32.768 kHz.
///
/// The user has to ensure that the LSE clock is running, or the timer won't
/// start counting.
Lse = 0b11,
}
/// Interrupt enable flags.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub struct Interrupts {
/// Encoder direction change to down.
pub enc_dir_down: bool,
/// Encoder direction change to up.
pub enc_dir_up: bool,
/// ARR register update successful.
pub autoreload_update_ok: bool,
/// CMP register update successful.
pub compare_update_ok: bool,
/// Valid edge on ext. trigger input.
pub ext_trig: bool,
/// ARR register matches current CNT value.
pub autoreload_match: bool,
/// CMP register matches current CNT value.
pub compare_match: bool,
}
/// Low-Power Timer (`LPTIM`).
///
/// The Low-Power Timer is a 16-bit timer with a prescaler of up to 128. It can run off of the APB1,
/// LSI, HSI16, or LSE clocks. With LSE, the slowest clock at 32.768 kHz, this results in a maximum
/// timeout of 256 seconds, or 4 minutes and 16 seconds.
///
/// The timer can be initialized either in one-shot mode or in periodic mode, using `init_oneshot`
/// or `init_periodic` respectively. In periodic mode, the embedded-hal `Periodic` marker trait is
/// implemented and the `CountDown` implementation uses `Hertz` as the time unit. In one-shot mode,
/// the `CountDown` implementation instead uses `Microseconds`, allowing for a multi-second timeout
/// to be configured (with the tradeoff being a larger code size due to use of 64-bit arithmetic).
pub struct LpTimer<M: CountMode> {
lptim: LPTIM,
input_freq: Hertz,
_mode: PhantomData<M>,
}
impl LpTimer<Periodic> {
/// Initializes the Low-Power Timer in periodic mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_periodic(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<OneShot> {
/// Initializes the Low-Power Timer in one-shot mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_oneshot(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self |
}
impl LpTimer<Encoder> {
/// Initializes the Low-Power Timer in encoder mode.
///
/// The `start` method must be called to enable the encoder input.
pub fn init_encoder(
lptim: LPTIM,
pwr: &mut PWR,
rcc: &mut Rcc,
clk: ClockSrc,
(pb5, pb7): (gpiob::PB5<gpio::Analog>, gpiob::PB7<gpio::Analog>),
) -> Self {
pb5.set_alt_mode(gpio::AltMode::AF2);
pb7.set_alt_mode(gpio::AltMode::AF2);
Self::init(lptim, pwr, rcc, clk)
}
// TODO: Dedupe this fn with configure() function in `impl<M: CountMode> LpTimer<M>`
fn configure_encoder(&mut self, arr: u16) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
// Configure in encoder mode
self.lptim.cfgr.write(|w| {
w
// Make sure prescaler is disabled. Encoder mode forbids prescaling.
.presc()
.div1()
// Put timer into encoder mode
.enc()
.set_bit()
// Choose internal clock source - external sources not supported in encoder mode.
.cksel()
.clear_bit()
// Start counting from software trigger
.trigen()
.sw()
// Clock polarity
.ckpol()
.both_edges()
});
// Enable timer
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(arr));
}
/// Enable the timer and begin counting encoder pulses.
///
/// The provided value is stored in the ARR (Auto Reload Register). The timer's internal counter
/// will wrap when this value is reached.
pub fn enable(&mut self, arr: u16) {
self.configure_encoder(arr);
// Enable timer, enable continuous mode
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
/// Disable the timer.
pub fn disable(&mut self) {
self.lptim.cr.write(|w| w.enable().clear_bit());
}
/// Get the current count of the encoder.
pub fn count(&self) -> u16 {
(self.lptim.cnt.read().bits() & 0xffff) as u16
}
/// Clear all LPTIM interrupt flags
pub fn clear_flags(&self) {
self.lptim.icr.write(|w| unsafe { w.bits(0x7f) });
}
}
impl<M: CountMode> LpTimer<M> {
fn init(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
// `pwr` is not used. It is used as a marker that guarantees that `PWR.CR` is set so this
// function can set the `RCC.LSEON` bit, which is otherwise write protected.
let _ = pwr;
// Enable selected clock and determine its frequency
let input_freq = match clk {
ClockSrc::Apb1 => rcc.clocks.apb1_clk(), // always enabled
ClockSrc::Lsi => {
// Turn on LSI
rcc.rb.csr.modify(|_, w| w.lsion().set_bit());
// Wait for LSI to be ready
while rcc.rb.csr.read().lsirdy().bit_is_clear() {}
Hertz(37_000)
}
ClockSrc::Hsi16 => {
// Turn on HSI16
rcc.rb.cr.modify(|_, w| w.hsi16on().set_bit());
// Wait for HSI16 to be ready
while rcc.rb.cr.read().hsi16rdyf().bit_is_clear() {}
Hertz(16_000_000)
}
ClockSrc::Lse => {
// Turn on LSE
rcc.rb.csr.modify(|_, w| w.lseon().set_bit());
// Wait for LSE to be ready
while rcc.rb.csr.read().lserdy().bit_is_clear() {}
Hertz(32_768)
}
};
// Select and enable clock. Right now we only support the internal RCC clocks, but LPTIM can
// also run as a counter with a dedicated external input.
rcc.rb.ccipr.modify(|_, w| w.lptim1sel().bits(clk as u8));
LPTIM::enable(rcc);
LPTIM::reset(rcc);
Self {
lptim,
input_freq,
_mode: PhantomData,
}
}
/// Disables the timer and configures it so that starting it will make it fire at the given
/// frequency.
fn configure(&mut self, conf: TimeConf) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
self.lptim
.cfgr
.write(|w| w.presc().bits(conf.psc_encoded).timout().set_bit());
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(conf.arr));
}
/// Disables and destructs the timer, returning the raw `LPTIM` peripheral.
pub fn free(self) -> LPTIM {
self.lptim.cr.reset();
self.lptim
}
/// Disables the timer and enables the given interrupts.
pub fn enable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().enabled();
}
if interrupts.enc_dir_up {
w.upie().enabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().enabled();
}
if interrupts.compare_update_ok {
w.cmpokie().enabled();
}
if interrupts.ext_trig {
w.exttrigie().enabled();
}
if interrupts.autoreload_match {
w.arrmie().enabled();
}
if interrupts.compare_match {
w.cmpmie().enabled();
}
w
})
}
/// Disables the timer and disables the given interrupts.
pub fn disable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().disabled();
}
if interrupts.enc_dir_up {
w.upie().disabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().disabled();
}
if interrupts.compare_update_ok {
w.cmpokie().disabled();
}
if interrupts.ext_trig {
w.exttrigie().disabled();
}
if interrupts.autoreload_match {
w.arrmie().disabled();
}
if interrupts.compare_match {
w.cmpmie().disabled();
}
w
})
}
}
impl hal::timer::CountDown for LpTimer<Periodic> {
type Time = Hertz;
fn start<T>(&mut self, freq: T)
where
T: Into<Hertz>,
{
self.configure(TimeConf::calculate_freq(self.input_freq, freq.into()));
// Start LPTIM in continuous mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
impl hal::timer::Periodic for LpTimer<Periodic> {}
impl hal::timer::CountDown for LpTimer<OneShot> {
type Time = Microseconds;
fn start<T>(&mut self, period: T)
where
T: Into<Microseconds>,
{
self.configure(TimeConf::calculate_period(self.input_freq, period.into()));
// Start LPTIM in one-shot mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().sngstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
#[derive(Copy, Clone)]
struct TimeConf {
psc_encoded: u8,
arr: u16,
}
impl TimeConf {
const ARR_MAX: u16 = u16::max_value();
/// Calculates prescaler and autoreload value for producing overflows at a rate of
/// `output_freq`.
fn calculate_freq(input_freq: Hertz, output_freq: Hertz) -> Self {
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = (Fi / psc) / ARR
//
// Therefore:
// Fo * ARR = Fi / psc
// Fo * ARR * psc = Fi
// ARR = (Fi / Fo) / psc
// psc = (Fi / Fo) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen frequency is too slow for the timer and
// we panic. Otherwise we use that `psc` to calculate the real `ARR`.
// Add `ARR_MAX - 1` to round the result upwards
let psc = ((input_freq.0 / output_freq.0) + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128);
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = u16::try_from((input_freq.0 / output_freq.0) / psc).unwrap();
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
/// Calculates prescaler and autoreload value for producing overflows after every
/// `output_period`.
fn calculate_period(input_freq: Hertz, output_period: Microseconds) -> Self {
// Here, the `output_period` can be very long, resulting in an output frequency of < 1 Hz.
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// Po = 1 / Fo = Output Period
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = 1 / Po = (Fi / psc) / ARR
//
// Therefore:
// ARR / Po = Fi / psc
// (ARR * psc) / Po = Fi
// ARR * psc = Fi * Po
// ARR = (Fi * Po) / psc
// psc = (Fi * Po) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen period is too long for the timer and we
// panic. Otherwise we use that `psc` to calculate the real `ARR`.
// First, calculate the product `Fi * Po`. Since `output_period` is in µs, we have to divide
// it by 1_000_000 to get seconds, without losing much precision. We can divide either of
// the multiplicants, or the resulting product. Dividing the resulting product results in
// the least amount of rouding error, but might require 64-bit multiplication and division,
// which is very expensive. Dividing either of the multiplicands by 1_000_000 can easily
// result in significant rounding error that makes this API useless.
let fi_po = u32(u64(input_freq.0) * u64(output_period.0) / 1_000_000).unwrap();
// Add `ARR_MAX - 1` to round the result upwards
let psc = (fi_po + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
assert!(psc > 0); // if 0, the output period is too short to be produced from input_freq
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128); // if > 128, the output period is too long to be produced from input_freq
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = (fi_po / psc) as u16;
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Test-only methods.
impl TimeConf {
fn psc(&self) -> u8 {
1 << self.psc_encoded
}
/// Calculates the output frequency if the timer is configured according to `self` and is run at
/// `input_freq`.
fn output_freq(&self, input_freq: Hertz) -> Hertz {
Hertz(input_freq.0 / u32(self.psc()) / u32(self.arr))
}
fn output_period(&self, input_freq: Hertz) -> Microseconds {
Microseconds(
u32(u64(self.psc()) * u64(self.arr) * 1_000_000 / u64(input_freq.0)).unwrap(),
)
}
}
#[test]
fn calc_from_freq() {
// no psc necessary (so psc=1)
let c = TimeConf::calculate_freq(32_768.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 32_768);
assert_eq!(c.output_freq(32_768.hz()), 1.hz());
// barely works with psc=1
let c = TimeConf::calculate_freq(65535.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq(65535.hz()), 1.hz());
// barely needs psc=2
let c = TimeConf::calculate_freq(65536.hz(), 1.hz());
assert_eq!(c.psc(), 2);
assert_eq!(c.arr, 32768);
assert_eq!(c.output_freq(65536.hz()), 1.hz());
// maximum possible ratio, needs psc=128 and max ARR
let c = TimeConf::calculate_freq((65535 * 128).hz(), 1.hz());
assert_eq!(c.psc(), 128);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq((65535 * 128).hz()), 1.hz());
}
#[test]
#[should_panic(expected = "assertion failed: psc <= 128")]
fn freq_ratio_too_large() {
TimeConf::calculate_freq((65535 * 128 + 1).hz(), 1.hz());
}
#[test]
fn calc_from_period() {
// 1:1 ratio
let c = TimeConf::calculate_period(1_000.hz(), 1_000.us());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 1);
assert_eq!(c.output_freq(1_000.hz()), 1_000.hz());
// real-world test: go from 32.768 kHz to 10 s
let c = TimeConf::calculate_period(32_768.hz(), 10 | {
Self::init(lptim, pwr, rcc, clk)
} | identifier_body |
lptim.rs | ;
use core::marker::PhantomData;
use embedded_time::duration::Microseconds;
use embedded_time::rate::Hertz;
use void::Void;
mod sealed {
pub trait Sealed {}
}
/// Low-Power Timer counting in one-shot mode.
pub enum OneShot {}
/// Low-Power Timer counting in periodic mode.
pub enum Periodic {}
/// Low-Power Timer in encoder mode.
pub enum Encoder {}
impl sealed::Sealed for OneShot {}
impl sealed::Sealed for Periodic {}
impl sealed::Sealed for Encoder {}
/// Marker trait for counter directions.
pub trait CountMode: sealed::Sealed {}
impl CountMode for OneShot {}
impl CountMode for Periodic {}
impl CountMode for Encoder {}
/// Clock source selection for the Low-Power Timer `LPTIM`.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ClockSrc {
/// Drive LPTIM with APB1 clock.
Apb1 = 0b00,
/// Drive LPTIM with Low-Speed Internal (LSI) clock.
///
/// The user has to ensure that the LSI clock is running, or the timer won't
/// start counting.
Lsi = 0b01,
/// Drive LPTIM with Internal 16 MHz clock.
Hsi16 = 0b10,
/// Drive LPTIM with Low-Speed External (LSE) clock at 32.768 kHz.
///
/// The user has to ensure that the LSE clock is running, or the timer won't
/// start counting.
Lse = 0b11,
}
/// Interrupt enable flags.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub struct Interrupts {
/// Encoder direction change to down.
pub enc_dir_down: bool,
/// Encoder direction change to up.
pub enc_dir_up: bool,
/// ARR register update successful.
pub autoreload_update_ok: bool,
/// CMP register update successful.
pub compare_update_ok: bool,
/// Valid edge on ext. trigger input.
pub ext_trig: bool,
/// ARR register matches current CNT value.
pub autoreload_match: bool,
/// CMP register matches current CNT value.
pub compare_match: bool,
}
/// Low-Power Timer (`LPTIM`).
///
/// The Low-Power Timer is a 16-bit timer with a prescaler of up to 128. It can run off of the APB1,
/// LSI, HSI16, or LSE clocks. With LSE, the slowest clock at 32.768 kHz, this results in a maximum
/// timeout of 256 seconds, or 4 minutes and 16 seconds.
///
/// The timer can be initialized either in one-shot mode or in periodic mode, using `init_oneshot`
/// or `init_periodic` respectively. In periodic mode, the embedded-hal `Periodic` marker trait is
/// implemented and the `CountDown` implementation uses `Hertz` as the time unit. In one-shot mode,
/// the `CountDown` implementation instead uses `Microseconds`, allowing for a multi-second timeout
/// to be configured (with the tradeoff being a larger code size due to use of 64-bit arithmetic).
pub struct LpTimer<M: CountMode> {
lptim: LPTIM,
input_freq: Hertz,
_mode: PhantomData<M>,
}
impl LpTimer<Periodic> {
/// Initializes the Low-Power Timer in periodic mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_periodic(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<OneShot> {
/// Initializes the Low-Power Timer in one-shot mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_oneshot(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<Encoder> {
/// Initializes the Low-Power Timer in encoder mode.
///
/// The `start` method must be called to enable the encoder input.
pub fn init_encoder(
lptim: LPTIM,
pwr: &mut PWR,
rcc: &mut Rcc,
clk: ClockSrc,
(pb5, pb7): (gpiob::PB5<gpio::Analog>, gpiob::PB7<gpio::Analog>),
) -> Self {
pb5.set_alt_mode(gpio::AltMode::AF2);
pb7.set_alt_mode(gpio::AltMode::AF2);
Self::init(lptim, pwr, rcc, clk)
}
// TODO: Dedupe this fn with configure() function in `impl<M: CountMode> LpTimer<M>`
fn configure_encoder(&mut self, arr: u16) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
// Configure in encoder mode
self.lptim.cfgr.write(|w| {
w
// Make sure prescaler is disabled. Encoder mode forbids prescaling.
.presc()
.div1()
// Put timer into encoder mode
.enc()
.set_bit()
// Choose internal clock source - external sources not supported in encoder mode.
.cksel()
.clear_bit()
// Start counting from software trigger
.trigen()
.sw()
// Clock polarity
.ckpol()
.both_edges()
});
// Enable timer
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(arr));
}
/// Enable the timer and begin counting encoder pulses.
///
/// The provided value is stored in the ARR (Auto Reload Register). The timer's internal counter
/// will wrap when this value is reached.
pub fn enable(&mut self, arr: u16) {
self.configure_encoder(arr);
// Enable timer, enable continuous mode
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
/// Disable the timer.
pub fn disable(&mut self) {
self.lptim.cr.write(|w| w.enable().clear_bit());
}
/// Get the current count of the encoder.
pub fn count(&self) -> u16 {
(self.lptim.cnt.read().bits() & 0xffff) as u16
}
/// Clear all LPTIM interrupt flags
pub fn clear_flags(&self) {
self.lptim.icr.write(|w| unsafe { w.bits(0x7f) });
}
}
impl<M: CountMode> LpTimer<M> {
fn init(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
// `pwr` is not used. It is used as a marker that guarantees that `PWR.CR` is set so this
// function can set the `RCC.LSEON` bit, which is otherwise write protected.
let _ = pwr;
// Enable selected clock and determine its frequency
let input_freq = match clk {
ClockSrc::Apb1 => rcc.clocks.apb1_clk(), // always enabled
ClockSrc::Lsi => {
// Turn on LSI
rcc.rb.csr.modify(|_, w| w.lsion().set_bit());
// Wait for LSI to be ready
while rcc.rb.csr.read().lsirdy().bit_is_clear() {}
Hertz(37_000)
}
ClockSrc::Hsi16 => {
// Turn on HSI16
rcc.rb.cr.modify(|_, w| w.hsi16on().set_bit());
// Wait for HSI16 to be ready
while rcc.rb.cr.read().hsi16rdyf().bit_is_clear() {}
Hertz(16_000_000)
}
ClockSrc::Lse => {
// Turn on LSE
rcc.rb.csr.modify(|_, w| w.lseon().set_bit());
// Wait for LSE to be ready
while rcc.rb.csr.read().lserdy().bit_is_clear() {}
Hertz(32_768)
}
};
// Select and enable clock. Right now we only support the internal RCC clocks, but LPTIM can
// also run as a counter with a dedicated external input.
rcc.rb.ccipr.modify(|_, w| w.lptim1sel().bits(clk as u8));
LPTIM::enable(rcc);
LPTIM::reset(rcc);
Self {
lptim,
input_freq,
_mode: PhantomData,
}
}
/// Disables the timer and configures it so that starting it will make it fire at the given
/// frequency.
fn configure(&mut self, conf: TimeConf) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
self.lptim
.cfgr
.write(|w| w.presc().bits(conf.psc_encoded).timout().set_bit()); | // The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(conf.arr));
}
/// Disables and destructs the timer, returning the raw `LPTIM` peripheral.
pub fn free(self) -> LPTIM {
self.lptim.cr.reset();
self.lptim
}
/// Disables the timer and enables the given interrupts.
pub fn enable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().enabled();
}
if interrupts.enc_dir_up {
w.upie().enabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().enabled();
}
if interrupts.compare_update_ok {
w.cmpokie().enabled();
}
if interrupts.ext_trig {
w.exttrigie().enabled();
}
if interrupts.autoreload_match {
w.arrmie().enabled();
}
if interrupts.compare_match {
w.cmpmie().enabled();
}
w
})
}
/// Disables the timer and disables the given interrupts.
pub fn disable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().disabled();
}
if interrupts.enc_dir_up {
w.upie().disabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().disabled();
}
if interrupts.compare_update_ok {
w.cmpokie().disabled();
}
if interrupts.ext_trig {
w.exttrigie().disabled();
}
if interrupts.autoreload_match {
w.arrmie().disabled();
}
if interrupts.compare_match {
w.cmpmie().disabled();
}
w
})
}
}
impl hal::timer::CountDown for LpTimer<Periodic> {
type Time = Hertz;
fn start<T>(&mut self, freq: T)
where
T: Into<Hertz>,
{
self.configure(TimeConf::calculate_freq(self.input_freq, freq.into()));
// Start LPTIM in continuous mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
impl hal::timer::Periodic for LpTimer<Periodic> {}
impl hal::timer::CountDown for LpTimer<OneShot> {
type Time = Microseconds;
fn start<T>(&mut self, period: T)
where
T: Into<Microseconds>,
{
self.configure(TimeConf::calculate_period(self.input_freq, period.into()));
// Start LPTIM in one-shot mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().sngstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
#[derive(Copy, Clone)]
struct TimeConf {
psc_encoded: u8,
arr: u16,
}
impl TimeConf {
const ARR_MAX: u16 = u16::max_value();
/// Calculates prescaler and autoreload value for producing overflows at a rate of
/// `output_freq`.
fn calculate_freq(input_freq: Hertz, output_freq: Hertz) -> Self {
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = (Fi / psc) / ARR
//
// Therefore:
// Fo * ARR = Fi / psc
// Fo * ARR * psc = Fi
// ARR = (Fi / Fo) / psc
// psc = (Fi / Fo) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen frequency is too slow for the timer and
// we panic. Otherwise we use that `psc` to calculate the real `ARR`.
// Add `ARR_MAX - 1` to round the result upwards
let psc = ((input_freq.0 / output_freq.0) + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128);
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = u16::try_from((input_freq.0 / output_freq.0) / psc).unwrap();
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
/// Calculates prescaler and autoreload value for producing overflows after every
/// `output_period`.
fn calculate_period(input_freq: Hertz, output_period: Microseconds) -> Self {
// Here, the `output_period` can be very long, resulting in an output frequency of < 1 Hz.
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// Po = 1 / Fo = Output Period
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = 1 / Po = (Fi / psc) / ARR
//
// Therefore:
// ARR / Po = Fi / psc
// (ARR * psc) / Po = Fi
// ARR * psc = Fi * Po
// ARR = (Fi * Po) / psc
// psc = (Fi * Po) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen period is too long for the timer and we
// panic. Otherwise we use that `psc` to calculate the real `ARR`.
// First, calculate the product `Fi * Po`. Since `output_period` is in µs, we have to divide
// it by 1_000_000 to get seconds, without losing much precision. We can divide either of
// the multiplicants, or the resulting product. Dividing the resulting product results in
// the least amount of rouding error, but might require 64-bit multiplication and division,
// which is very expensive. Dividing either of the multiplicands by 1_000_000 can easily
// result in significant rounding error that makes this API useless.
let fi_po = u32(u64(input_freq.0) * u64(output_period.0) / 1_000_000).unwrap();
// Add `ARR_MAX - 1` to round the result upwards
let psc = (fi_po + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
assert!(psc > 0); // if 0, the output period is too short to be produced from input_freq
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128); // if > 128, the output period is too long to be produced from input_freq
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = (fi_po / psc) as u16;
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Test-only methods.
impl TimeConf {
fn psc(&self) -> u8 {
1 << self.psc_encoded
}
/// Calculates the output frequency if the timer is configured according to `self` and is run at
/// `input_freq`.
fn output_freq(&self, input_freq: Hertz) -> Hertz {
Hertz(input_freq.0 / u32(self.psc()) / u32(self.arr))
}
fn output_period(&self, input_freq: Hertz) -> Microseconds {
Microseconds(
u32(u64(self.psc()) * u64(self.arr) * 1_000_000 / u64(input_freq.0)).unwrap(),
)
}
}
#[test]
fn calc_from_freq() {
// no psc necessary (so psc=1)
let c = TimeConf::calculate_freq(32_768.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 32_768);
assert_eq!(c.output_freq(32_768.hz()), 1.hz());
// barely works with psc=1
let c = TimeConf::calculate_freq(65535.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq(65535.hz()), 1.hz());
// barely needs psc=2
let c = TimeConf::calculate_freq(65536.hz(), 1.hz());
assert_eq!(c.psc(), 2);
assert_eq!(c.arr, 32768);
assert_eq!(c.output_freq(65536.hz()), 1.hz());
// maximum possible ratio, needs psc=128 and max ARR
let c = TimeConf::calculate_freq((65535 * 128).hz(), 1.hz());
assert_eq!(c.psc(), 128);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq((65535 * 128).hz()), 1.hz());
}
#[test]
#[should_panic(expected = "assertion failed: psc <= 128")]
fn freq_ratio_too_large() {
TimeConf::calculate_freq((65535 * 128 + 1).hz(), 1.hz());
}
#[test]
fn calc_from_period() {
// 1:1 ratio
let c = TimeConf::calculate_period(1_000.hz(), 1_000.us());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 1);
assert_eq!(c.output_freq(1_000.hz()), 1_000.hz());
// real-world test: go from 32.768 kHz to 10 s
let c = TimeConf::calculate_period(32_768.hz(), 10_000 |
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled." | random_line_split |
campaign_loader.rs | use std::path::{Path, PathBuf};
use std::ffi::OsStr;
//For image loading and hooking into the Task system
use coffee::{
load::Task,
graphics::Image,
};
use coffee::graphics::Gpu;
//For config file loading and parsing
use config::*;
//To locate all config files
extern crate walkdir;
use walkdir::WalkDir;
use super::{AssetDatabase, AssetContainer, SpriteSheet};
use super::audio::{ClipCategory, AudioClip};
//loads the metadata for each campaign so we can display the options
//pub fn load_all_campaign_metadata(asset_db: &mut AssetDatabase) {//-> Config {
//}
//loads all data for a given campaign
pub fn load_campaign_data(path: &str, gpu: &mut Gpu, asset_db: &mut AssetDatabase) {
//load all config files under the campain folder
let mut campaign_config_paths = find_config_files(path);
//for each config file, load it then load the associated asset
while let Some(config_path) = campaign_config_paths.pop() {
let config =
match load_config_task(&config_path).run(gpu) {
Ok(config) => config,
Err(e) => {
warn!("[Asset Loading] Could not load config file. Following error returned: {}", e);
continue //skip this config file. TODO never gets to the error message at end of loop
},
};
//TODO make type case insensitive
let asset_was_loaded = match config.get_str("type").unwrap_or("".to_string()).as_str() {
"sprite sheet" => load_sprite_sheet(&config, &config_path, gpu, asset_db),
"audio clip" => load_audio_clip(&config, &config_path, asset_db),
_ => {
warn!("[Asset Loading] 'Type' key does not exist or value is not supported. Config File Path: {}",
config_path.to_str().unwrap());
false
},
};
//do some extra logging to help bring errors to people's attention.
if asset_was_loaded | else {
error!("[Asset Loading] Failed to load asset relating to config file {}. {}",
config_path.to_str().unwrap(),
"Please review previous warnings."
);
}
}
}
//make sure file is one of the right formats for configuration files
fn is_config_ext(file_path: &Path) -> bool {
match file_path.extension().and_then(OsStr::to_str) {
Some("yml") => true,
_ => false
}
}
//locates all config files under a given path recursivly
fn find_config_files(path: &str) -> Vec<PathBuf> {
let mut config_file_paths = vec![];
for entry in WalkDir::new(path).into_iter().filter_map(|e| e.ok()) {
if is_config_ext( entry.path() )
&& entry.path().file_stem().unwrap_or(OsStr::new(""))!= "campaign" {
config_file_paths.push(entry.into_path());
}
}
config_file_paths
}
//utility function to create a coffee error since it's a bit of a pain.
fn make_coffee_err_from_str(msg: &str) -> coffee::Error {
coffee::Error::IO(
std::io::Error::new( std::io::ErrorKind::Other, msg )
)
}
//creates a task for loading a config file and it's resources
fn load_config_task(file_path: &PathBuf) -> Task<Config> {
//needed so closure below can capture
let path = file_path.clone();
Task::new(move || {
//coerce into string value or return error
let str_path = match path.to_str() {
Some(string) => string,
//Will be logged in the function that runs the task.
None => return Err(
make_coffee_err_from_str("Config path cannot be converted to string.")
),
};
//create the config struct and load in the given file either retuning populated
// config file or a relevant error
let mut config_data = Config::default();
match config_data.merge(File::with_name(&str_path)) {
Ok(_) => Ok(config_data),
//Coerce err to an error type we can return.
//Will be logged in the function that runs the task.
Err(err) => Err( make_coffee_err_from_str( err.to_string().as_str() ) ),
}
})
}
//load sprite sheets
//TODO, maybe should make this return a task also?
fn load_sprite_sheet(config: &Config, config_path: &PathBuf,
gpu: &mut Gpu, asset_db: &mut AssetDatabase) -> bool {
//pull data we need and validate
let file = config.get_str("file");
let rows = config.get_int("rows");
let columns = config.get_int("columns");
let animations = config.get_table("animations");
if file.is_err() || rows.is_err() || columns.is_err() {
let err_msg_head = format!("{} {} {}. {}",
"[Asset Loading]",
"Could not find required config value for sprite sheet type in config file",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
"Error follows: ");
if let Err(err) = file { warn!("{} {}", err_msg_head, err); }
if let Err(err) = rows { warn!("{} {}", err_msg_head, err); }
if let Err(err) = columns { warn!("{} {}", err_msg_head, err); }
return false //config missing required values
}
//process the file path and asset name to the right types
// assume image path is given as relative to config path hence taking the parent as a starting point.
let image_path = match config_path.parent() {
Some(dir_path) => dir_path.join(file.ok().expect("File value is missing while loading.")),
//getting parent from path failed somehow. Shouldn't ever happen naturally.
None => {
warn!("{} {}",
"[Asset Loading] Parent missing from config path when processing",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
},
};
let asset_name = match image_path.clone().into_os_string().into_string() {
Ok(name) => name,
Err(err) => {
warn!("[Asset Loading] {}",
err.into_string().unwrap_or("<Could not convert OsString err into string>".to_string()));
return false //name is not UTF-8 compatable so abort
}
};
//try to load image
let image = match Image::load( image_path.clone() ).run(gpu) {
Ok(image) => image,
Err(err) => {
warn!("[Asset Loading] Could not load Image at {} related to config file {}. Following error returned: {}",
image_path.clone().to_str().unwrap_or("<error could not convert image path to str>"),
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
err,
);
return false //load image failed.
}
};
//create sprite sheet, add animations, then add the new asset to the database
let mut spritesheet = SpriteSheet::new(
image,
rows.ok().expect("row convert error") as u16,
columns.ok().expect("column convert error") as u16,
);
if animations.is_ok() {
for (animation_name, tuple_list) in animations.ok().unwrap().iter() {
match tuple_list.clone().try_into::< Vec<(u16,u16)> >() {
Ok(sprite_pos_array) =>
//TODO might want to do additional checking of data.
// No error is thrown for having an extra value regardless if it is an int or not.
// Error branch will happen if a string is in 1st or 2nd location or if a tuple is
// replaced by something else.
spritesheet.add_animation(animation_name.clone(), sprite_pos_array),
Err(err) => {
warn!("[Asset Loading] Animation {} does not follow form {} in config file {}. Following error returned: {}",
animation_name,
"[ [row_1, col_1],..., [row_n, col_n] ]",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
err,
);
continue;
}
}
}
}
asset_db.add_asset(asset_name, AssetContainer::Spritesheet(spritesheet));
return true;
}
//load sound clips
//TODO, maybe should make this return a task also?
fn load_audio_clip(config: &Config, config_path: &PathBuf, asset_db: &mut AssetDatabase) -> bool {
//pull data we need and validate
let file = config.get_str("file");
let category = config.get_str("category");
if file.is_err() || category.is_err() {
let err_msg_head = format!("{} {} {}. {}",
"[Asset Loading]",
"Could not find required config value for audio clip type in config file",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
"Error follows: ");
if let Err(err) = file { warn!("{} {}", err_msg_head, err); }
if let Err(err) = category { warn!("{} {}", err_msg_head, err); }
return false //config missing required values
}
//TODO make case insensitive
let clip_category = match category.unwrap().as_str() {
"voice" => ClipCategory::Voice,
"music" => ClipCategory::Music,
"effects" => ClipCategory::Effects,
failed_category => {
warn!("[Asset Loading] Provided audio category '{}' is not a valid option. Related to config file {}.",
failed_category,
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
}
};
// assume image path is given as relative to config path hence taking the parent as a starting point.
let audio_path = match config_path.parent() {
Some(dir_path) => dir_path.join(file.ok().expect("File value is missing while loading.")),
//getting parent from path failed somehow. Shouldn't ever happen naturally.
None => {
warn!("{} {}",
"[Asset Loading] Parent missing from config path when processing",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
},
};
let asset_name = match audio_path.clone().into_os_string().into_string() {
Ok(name) => name,
Err(err) => {
warn!("[Asset Loading] {}",
err.into_string().unwrap_or("<Could not convert OsString err into string>".to_string()));
return false; //name is not UTF-8 compatable so abort
}
};
let audio_clip = AudioClip::new(audio_path, clip_category);
asset_db.add_asset(asset_name, AssetContainer::AudioClip(audio_clip));
return true;
}
| {
info!("[Asset Loading] Loaded asset relating to config file {}",
config_path.to_str().unwrap());
} | conditional_block |
campaign_loader.rs | use std::path::{Path, PathBuf};
use std::ffi::OsStr;
//For image loading and hooking into the Task system
use coffee::{
load::Task,
graphics::Image,
};
use coffee::graphics::Gpu;
//For config file loading and parsing
use config::*;
//To locate all config files
extern crate walkdir;
use walkdir::WalkDir;
use super::{AssetDatabase, AssetContainer, SpriteSheet};
use super::audio::{ClipCategory, AudioClip};
//loads the metadata for each campaign so we can display the options
//pub fn load_all_campaign_metadata(asset_db: &mut AssetDatabase) {//-> Config {
//}
//loads all data for a given campaign
pub fn load_campaign_data(path: &str, gpu: &mut Gpu, asset_db: &mut AssetDatabase) {
//load all config files under the campain folder
let mut campaign_config_paths = find_config_files(path);
//for each config file, load it then load the associated asset
while let Some(config_path) = campaign_config_paths.pop() {
let config =
match load_config_task(&config_path).run(gpu) {
Ok(config) => config,
Err(e) => {
warn!("[Asset Loading] Could not load config file. Following error returned: {}", e);
continue //skip this config file. TODO never gets to the error message at end of loop
},
};
//TODO make type case insensitive
let asset_was_loaded = match config.get_str("type").unwrap_or("".to_string()).as_str() {
"sprite sheet" => load_sprite_sheet(&config, &config_path, gpu, asset_db),
"audio clip" => load_audio_clip(&config, &config_path, asset_db),
_ => {
warn!("[Asset Loading] 'Type' key does not exist or value is not supported. Config File Path: {}",
config_path.to_str().unwrap());
false
},
};
//do some extra logging to help bring errors to people's attention.
if asset_was_loaded {
info!("[Asset Loading] Loaded asset relating to config file {}",
config_path.to_str().unwrap());
} else {
error!("[Asset Loading] Failed to load asset relating to config file {}. {}",
config_path.to_str().unwrap(),
"Please review previous warnings."
);
}
}
}
//make sure file is one of the right formats for configuration files
fn is_config_ext(file_path: &Path) -> bool {
match file_path.extension().and_then(OsStr::to_str) {
Some("yml") => true,
_ => false
}
}
//locates all config files under a given path recursivly
fn find_config_files(path: &str) -> Vec<PathBuf> {
let mut config_file_paths = vec![];
for entry in WalkDir::new(path).into_iter().filter_map(|e| e.ok()) {
if is_config_ext( entry.path() )
&& entry.path().file_stem().unwrap_or(OsStr::new(""))!= "campaign" {
config_file_paths.push(entry.into_path());
}
}
config_file_paths
}
//utility function to create a coffee error since it's a bit of a pain.
fn make_coffee_err_from_str(msg: &str) -> coffee::Error {
coffee::Error::IO(
std::io::Error::new( std::io::ErrorKind::Other, msg )
)
}
//creates a task for loading a config file and it's resources
fn load_config_task(file_path: &PathBuf) -> Task<Config> {
//needed so closure below can capture
let path = file_path.clone();
Task::new(move || {
//coerce into string value or return error
let str_path = match path.to_str() {
Some(string) => string,
//Will be logged in the function that runs the task.
None => return Err(
make_coffee_err_from_str("Config path cannot be converted to string.")
),
};
//create the config struct and load in the given file either retuning populated
// config file or a relevant error
let mut config_data = Config::default();
match config_data.merge(File::with_name(&str_path)) {
Ok(_) => Ok(config_data),
//Coerce err to an error type we can return.
//Will be logged in the function that runs the task.
Err(err) => Err( make_coffee_err_from_str( err.to_string().as_str() ) ),
}
})
}
//load sprite sheets
//TODO, maybe should make this return a task also?
fn load_sprite_sheet(config: &Config, config_path: &PathBuf,
gpu: &mut Gpu, asset_db: &mut AssetDatabase) -> bool {
//pull data we need and validate
let file = config.get_str("file");
let rows = config.get_int("rows");
let columns = config.get_int("columns");
let animations = config.get_table("animations");
if file.is_err() || rows.is_err() || columns.is_err() {
let err_msg_head = format!("{} {} {}. {}",
"[Asset Loading]",
"Could not find required config value for sprite sheet type in config file",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
"Error follows: ");
if let Err(err) = file { warn!("{} {}", err_msg_head, err); }
if let Err(err) = rows { warn!("{} {}", err_msg_head, err); }
if let Err(err) = columns { warn!("{} {}", err_msg_head, err); }
return false //config missing required values
}
//process the file path and asset name to the right types
// assume image path is given as relative to config path hence taking the parent as a starting point.
let image_path = match config_path.parent() {
Some(dir_path) => dir_path.join(file.ok().expect("File value is missing while loading.")),
//getting parent from path failed somehow. Shouldn't ever happen naturally.
None => {
warn!("{} {}",
"[Asset Loading] Parent missing from config path when processing",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
},
};
let asset_name = match image_path.clone().into_os_string().into_string() {
Ok(name) => name,
Err(err) => {
warn!("[Asset Loading] {}",
err.into_string().unwrap_or("<Could not convert OsString err into string>".to_string()));
return false //name is not UTF-8 compatable so abort
}
};
//try to load image
let image = match Image::load( image_path.clone() ).run(gpu) {
Ok(image) => image,
Err(err) => {
warn!("[Asset Loading] Could not load Image at {} related to config file {}. Following error returned: {}",
image_path.clone().to_str().unwrap_or("<error could not convert image path to str>"),
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
err,
);
return false //load image failed.
}
};
//create sprite sheet, add animations, then add the new asset to the database
let mut spritesheet = SpriteSheet::new(
image,
rows.ok().expect("row convert error") as u16,
columns.ok().expect("column convert error") as u16,
);
if animations.is_ok() {
for (animation_name, tuple_list) in animations.ok().unwrap().iter() {
match tuple_list.clone().try_into::< Vec<(u16,u16)> >() {
Ok(sprite_pos_array) =>
//TODO might want to do additional checking of data.
// No error is thrown for having an extra value regardless if it is an int or not.
// Error branch will happen if a string is in 1st or 2nd location or if a tuple is
// replaced by something else.
spritesheet.add_animation(animation_name.clone(), sprite_pos_array),
Err(err) => {
warn!("[Asset Loading] Animation {} does not follow form {} in config file {}. Following error returned: {}",
animation_name,
"[ [row_1, col_1],..., [row_n, col_n] ]",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
err,
);
continue;
}
}
}
}
asset_db.add_asset(asset_name, AssetContainer::Spritesheet(spritesheet));
return true;
}
//load sound clips
//TODO, maybe should make this return a task also?
fn load_audio_clip(config: &Config, config_path: &PathBuf, asset_db: &mut AssetDatabase) -> bool {
//pull data we need and validate
let file = config.get_str("file");
let category = config.get_str("category");
if file.is_err() || category.is_err() {
let err_msg_head = format!("{} {} {}. {}",
"[Asset Loading]",
"Could not find required config value for audio clip type in config file",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
"Error follows: ");
if let Err(err) = file { warn!("{} {}", err_msg_head, err); }
if let Err(err) = category { warn!("{} {}", err_msg_head, err); }
return false //config missing required values
}
//TODO make case insensitive
let clip_category = match category.unwrap().as_str() {
"voice" => ClipCategory::Voice,
"music" => ClipCategory::Music,
"effects" => ClipCategory::Effects,
failed_category => {
warn!("[Asset Loading] Provided audio category '{}' is not a valid option. Related to config file {}.",
failed_category,
config_path.to_str().unwrap_or("<error could not convert config path to str>"), | // assume image path is given as relative to config path hence taking the parent as a starting point.
let audio_path = match config_path.parent() {
Some(dir_path) => dir_path.join(file.ok().expect("File value is missing while loading.")),
//getting parent from path failed somehow. Shouldn't ever happen naturally.
None => {
warn!("{} {}",
"[Asset Loading] Parent missing from config path when processing",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
},
};
let asset_name = match audio_path.clone().into_os_string().into_string() {
Ok(name) => name,
Err(err) => {
warn!("[Asset Loading] {}",
err.into_string().unwrap_or("<Could not convert OsString err into string>".to_string()));
return false; //name is not UTF-8 compatable so abort
}
};
let audio_clip = AudioClip::new(audio_path, clip_category);
asset_db.add_asset(asset_name, AssetContainer::AudioClip(audio_clip));
return true;
} | );
return false;
}
};
| random_line_split |
campaign_loader.rs | use std::path::{Path, PathBuf};
use std::ffi::OsStr;
//For image loading and hooking into the Task system
use coffee::{
load::Task,
graphics::Image,
};
use coffee::graphics::Gpu;
//For config file loading and parsing
use config::*;
//To locate all config files
extern crate walkdir;
use walkdir::WalkDir;
use super::{AssetDatabase, AssetContainer, SpriteSheet};
use super::audio::{ClipCategory, AudioClip};
//loads the metadata for each campaign so we can display the options
//pub fn load_all_campaign_metadata(asset_db: &mut AssetDatabase) {//-> Config {
//}
//loads all data for a given campaign
pub fn load_campaign_data(path: &str, gpu: &mut Gpu, asset_db: &mut AssetDatabase) {
//load all config files under the campain folder
let mut campaign_config_paths = find_config_files(path);
//for each config file, load it then load the associated asset
while let Some(config_path) = campaign_config_paths.pop() {
let config =
match load_config_task(&config_path).run(gpu) {
Ok(config) => config,
Err(e) => {
warn!("[Asset Loading] Could not load config file. Following error returned: {}", e);
continue //skip this config file. TODO never gets to the error message at end of loop
},
};
//TODO make type case insensitive
let asset_was_loaded = match config.get_str("type").unwrap_or("".to_string()).as_str() {
"sprite sheet" => load_sprite_sheet(&config, &config_path, gpu, asset_db),
"audio clip" => load_audio_clip(&config, &config_path, asset_db),
_ => {
warn!("[Asset Loading] 'Type' key does not exist or value is not supported. Config File Path: {}",
config_path.to_str().unwrap());
false
},
};
//do some extra logging to help bring errors to people's attention.
if asset_was_loaded {
info!("[Asset Loading] Loaded asset relating to config file {}",
config_path.to_str().unwrap());
} else {
error!("[Asset Loading] Failed to load asset relating to config file {}. {}",
config_path.to_str().unwrap(),
"Please review previous warnings."
);
}
}
}
//make sure file is one of the right formats for configuration files
fn is_config_ext(file_path: &Path) -> bool {
match file_path.extension().and_then(OsStr::to_str) {
Some("yml") => true,
_ => false
}
}
//locates all config files under a given path recursivly
fn find_config_files(path: &str) -> Vec<PathBuf> {
let mut config_file_paths = vec![];
for entry in WalkDir::new(path).into_iter().filter_map(|e| e.ok()) {
if is_config_ext( entry.path() )
&& entry.path().file_stem().unwrap_or(OsStr::new(""))!= "campaign" {
config_file_paths.push(entry.into_path());
}
}
config_file_paths
}
//utility function to create a coffee error since it's a bit of a pain.
fn make_coffee_err_from_str(msg: &str) -> coffee::Error |
//creates a task for loading a config file and it's resources
fn load_config_task(file_path: &PathBuf) -> Task<Config> {
//needed so closure below can capture
let path = file_path.clone();
Task::new(move || {
//coerce into string value or return error
let str_path = match path.to_str() {
Some(string) => string,
//Will be logged in the function that runs the task.
None => return Err(
make_coffee_err_from_str("Config path cannot be converted to string.")
),
};
//create the config struct and load in the given file either retuning populated
// config file or a relevant error
let mut config_data = Config::default();
match config_data.merge(File::with_name(&str_path)) {
Ok(_) => Ok(config_data),
//Coerce err to an error type we can return.
//Will be logged in the function that runs the task.
Err(err) => Err( make_coffee_err_from_str( err.to_string().as_str() ) ),
}
})
}
//load sprite sheets
//TODO, maybe should make this return a task also?
fn load_sprite_sheet(config: &Config, config_path: &PathBuf,
gpu: &mut Gpu, asset_db: &mut AssetDatabase) -> bool {
//pull data we need and validate
let file = config.get_str("file");
let rows = config.get_int("rows");
let columns = config.get_int("columns");
let animations = config.get_table("animations");
if file.is_err() || rows.is_err() || columns.is_err() {
let err_msg_head = format!("{} {} {}. {}",
"[Asset Loading]",
"Could not find required config value for sprite sheet type in config file",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
"Error follows: ");
if let Err(err) = file { warn!("{} {}", err_msg_head, err); }
if let Err(err) = rows { warn!("{} {}", err_msg_head, err); }
if let Err(err) = columns { warn!("{} {}", err_msg_head, err); }
return false //config missing required values
}
//process the file path and asset name to the right types
// assume image path is given as relative to config path hence taking the parent as a starting point.
let image_path = match config_path.parent() {
Some(dir_path) => dir_path.join(file.ok().expect("File value is missing while loading.")),
//getting parent from path failed somehow. Shouldn't ever happen naturally.
None => {
warn!("{} {}",
"[Asset Loading] Parent missing from config path when processing",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
},
};
let asset_name = match image_path.clone().into_os_string().into_string() {
Ok(name) => name,
Err(err) => {
warn!("[Asset Loading] {}",
err.into_string().unwrap_or("<Could not convert OsString err into string>".to_string()));
return false //name is not UTF-8 compatable so abort
}
};
//try to load image
let image = match Image::load( image_path.clone() ).run(gpu) {
Ok(image) => image,
Err(err) => {
warn!("[Asset Loading] Could not load Image at {} related to config file {}. Following error returned: {}",
image_path.clone().to_str().unwrap_or("<error could not convert image path to str>"),
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
err,
);
return false //load image failed.
}
};
//create sprite sheet, add animations, then add the new asset to the database
let mut spritesheet = SpriteSheet::new(
image,
rows.ok().expect("row convert error") as u16,
columns.ok().expect("column convert error") as u16,
);
if animations.is_ok() {
for (animation_name, tuple_list) in animations.ok().unwrap().iter() {
match tuple_list.clone().try_into::< Vec<(u16,u16)> >() {
Ok(sprite_pos_array) =>
//TODO might want to do additional checking of data.
// No error is thrown for having an extra value regardless if it is an int or not.
// Error branch will happen if a string is in 1st or 2nd location or if a tuple is
// replaced by something else.
spritesheet.add_animation(animation_name.clone(), sprite_pos_array),
Err(err) => {
warn!("[Asset Loading] Animation {} does not follow form {} in config file {}. Following error returned: {}",
animation_name,
"[ [row_1, col_1],..., [row_n, col_n] ]",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
err,
);
continue;
}
}
}
}
asset_db.add_asset(asset_name, AssetContainer::Spritesheet(spritesheet));
return true;
}
//load sound clips
//TODO, maybe should make this return a task also?
fn load_audio_clip(config: &Config, config_path: &PathBuf, asset_db: &mut AssetDatabase) -> bool {
//pull data we need and validate
let file = config.get_str("file");
let category = config.get_str("category");
if file.is_err() || category.is_err() {
let err_msg_head = format!("{} {} {}. {}",
"[Asset Loading]",
"Could not find required config value for audio clip type in config file",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
"Error follows: ");
if let Err(err) = file { warn!("{} {}", err_msg_head, err); }
if let Err(err) = category { warn!("{} {}", err_msg_head, err); }
return false //config missing required values
}
//TODO make case insensitive
let clip_category = match category.unwrap().as_str() {
"voice" => ClipCategory::Voice,
"music" => ClipCategory::Music,
"effects" => ClipCategory::Effects,
failed_category => {
warn!("[Asset Loading] Provided audio category '{}' is not a valid option. Related to config file {}.",
failed_category,
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
}
};
// assume image path is given as relative to config path hence taking the parent as a starting point.
let audio_path = match config_path.parent() {
Some(dir_path) => dir_path.join(file.ok().expect("File value is missing while loading.")),
//getting parent from path failed somehow. Shouldn't ever happen naturally.
None => {
warn!("{} {}",
"[Asset Loading] Parent missing from config path when processing",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
},
};
let asset_name = match audio_path.clone().into_os_string().into_string() {
Ok(name) => name,
Err(err) => {
warn!("[Asset Loading] {}",
err.into_string().unwrap_or("<Could not convert OsString err into string>".to_string()));
return false; //name is not UTF-8 compatable so abort
}
};
let audio_clip = AudioClip::new(audio_path, clip_category);
asset_db.add_asset(asset_name, AssetContainer::AudioClip(audio_clip));
return true;
}
| {
coffee::Error::IO(
std::io::Error::new( std::io::ErrorKind::Other, msg )
)
} | identifier_body |
campaign_loader.rs | use std::path::{Path, PathBuf};
use std::ffi::OsStr;
//For image loading and hooking into the Task system
use coffee::{
load::Task,
graphics::Image,
};
use coffee::graphics::Gpu;
//For config file loading and parsing
use config::*;
//To locate all config files
extern crate walkdir;
use walkdir::WalkDir;
use super::{AssetDatabase, AssetContainer, SpriteSheet};
use super::audio::{ClipCategory, AudioClip};
//loads the metadata for each campaign so we can display the options
//pub fn load_all_campaign_metadata(asset_db: &mut AssetDatabase) {//-> Config {
//}
//loads all data for a given campaign
pub fn | (path: &str, gpu: &mut Gpu, asset_db: &mut AssetDatabase) {
//load all config files under the campain folder
let mut campaign_config_paths = find_config_files(path);
//for each config file, load it then load the associated asset
while let Some(config_path) = campaign_config_paths.pop() {
let config =
match load_config_task(&config_path).run(gpu) {
Ok(config) => config,
Err(e) => {
warn!("[Asset Loading] Could not load config file. Following error returned: {}", e);
continue //skip this config file. TODO never gets to the error message at end of loop
},
};
//TODO make type case insensitive
let asset_was_loaded = match config.get_str("type").unwrap_or("".to_string()).as_str() {
"sprite sheet" => load_sprite_sheet(&config, &config_path, gpu, asset_db),
"audio clip" => load_audio_clip(&config, &config_path, asset_db),
_ => {
warn!("[Asset Loading] 'Type' key does not exist or value is not supported. Config File Path: {}",
config_path.to_str().unwrap());
false
},
};
//do some extra logging to help bring errors to people's attention.
if asset_was_loaded {
info!("[Asset Loading] Loaded asset relating to config file {}",
config_path.to_str().unwrap());
} else {
error!("[Asset Loading] Failed to load asset relating to config file {}. {}",
config_path.to_str().unwrap(),
"Please review previous warnings."
);
}
}
}
//make sure file is one of the right formats for configuration files
fn is_config_ext(file_path: &Path) -> bool {
match file_path.extension().and_then(OsStr::to_str) {
Some("yml") => true,
_ => false
}
}
//locates all config files under a given path recursivly
fn find_config_files(path: &str) -> Vec<PathBuf> {
let mut config_file_paths = vec![];
for entry in WalkDir::new(path).into_iter().filter_map(|e| e.ok()) {
if is_config_ext( entry.path() )
&& entry.path().file_stem().unwrap_or(OsStr::new(""))!= "campaign" {
config_file_paths.push(entry.into_path());
}
}
config_file_paths
}
//utility function to create a coffee error since it's a bit of a pain.
fn make_coffee_err_from_str(msg: &str) -> coffee::Error {
coffee::Error::IO(
std::io::Error::new( std::io::ErrorKind::Other, msg )
)
}
//creates a task for loading a config file and it's resources
fn load_config_task(file_path: &PathBuf) -> Task<Config> {
//needed so closure below can capture
let path = file_path.clone();
Task::new(move || {
//coerce into string value or return error
let str_path = match path.to_str() {
Some(string) => string,
//Will be logged in the function that runs the task.
None => return Err(
make_coffee_err_from_str("Config path cannot be converted to string.")
),
};
//create the config struct and load in the given file either retuning populated
// config file or a relevant error
let mut config_data = Config::default();
match config_data.merge(File::with_name(&str_path)) {
Ok(_) => Ok(config_data),
//Coerce err to an error type we can return.
//Will be logged in the function that runs the task.
Err(err) => Err( make_coffee_err_from_str( err.to_string().as_str() ) ),
}
})
}
//load sprite sheets
//TODO, maybe should make this return a task also?
fn load_sprite_sheet(config: &Config, config_path: &PathBuf,
gpu: &mut Gpu, asset_db: &mut AssetDatabase) -> bool {
//pull data we need and validate
let file = config.get_str("file");
let rows = config.get_int("rows");
let columns = config.get_int("columns");
let animations = config.get_table("animations");
if file.is_err() || rows.is_err() || columns.is_err() {
let err_msg_head = format!("{} {} {}. {}",
"[Asset Loading]",
"Could not find required config value for sprite sheet type in config file",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
"Error follows: ");
if let Err(err) = file { warn!("{} {}", err_msg_head, err); }
if let Err(err) = rows { warn!("{} {}", err_msg_head, err); }
if let Err(err) = columns { warn!("{} {}", err_msg_head, err); }
return false //config missing required values
}
//process the file path and asset name to the right types
// assume image path is given as relative to config path hence taking the parent as a starting point.
let image_path = match config_path.parent() {
Some(dir_path) => dir_path.join(file.ok().expect("File value is missing while loading.")),
//getting parent from path failed somehow. Shouldn't ever happen naturally.
None => {
warn!("{} {}",
"[Asset Loading] Parent missing from config path when processing",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
},
};
let asset_name = match image_path.clone().into_os_string().into_string() {
Ok(name) => name,
Err(err) => {
warn!("[Asset Loading] {}",
err.into_string().unwrap_or("<Could not convert OsString err into string>".to_string()));
return false //name is not UTF-8 compatable so abort
}
};
//try to load image
let image = match Image::load( image_path.clone() ).run(gpu) {
Ok(image) => image,
Err(err) => {
warn!("[Asset Loading] Could not load Image at {} related to config file {}. Following error returned: {}",
image_path.clone().to_str().unwrap_or("<error could not convert image path to str>"),
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
err,
);
return false //load image failed.
}
};
//create sprite sheet, add animations, then add the new asset to the database
let mut spritesheet = SpriteSheet::new(
image,
rows.ok().expect("row convert error") as u16,
columns.ok().expect("column convert error") as u16,
);
if animations.is_ok() {
for (animation_name, tuple_list) in animations.ok().unwrap().iter() {
match tuple_list.clone().try_into::< Vec<(u16,u16)> >() {
Ok(sprite_pos_array) =>
//TODO might want to do additional checking of data.
// No error is thrown for having an extra value regardless if it is an int or not.
// Error branch will happen if a string is in 1st or 2nd location or if a tuple is
// replaced by something else.
spritesheet.add_animation(animation_name.clone(), sprite_pos_array),
Err(err) => {
warn!("[Asset Loading] Animation {} does not follow form {} in config file {}. Following error returned: {}",
animation_name,
"[ [row_1, col_1],..., [row_n, col_n] ]",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
err,
);
continue;
}
}
}
}
asset_db.add_asset(asset_name, AssetContainer::Spritesheet(spritesheet));
return true;
}
//load sound clips
//TODO, maybe should make this return a task also?
fn load_audio_clip(config: &Config, config_path: &PathBuf, asset_db: &mut AssetDatabase) -> bool {
//pull data we need and validate
let file = config.get_str("file");
let category = config.get_str("category");
if file.is_err() || category.is_err() {
let err_msg_head = format!("{} {} {}. {}",
"[Asset Loading]",
"Could not find required config value for audio clip type in config file",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
"Error follows: ");
if let Err(err) = file { warn!("{} {}", err_msg_head, err); }
if let Err(err) = category { warn!("{} {}", err_msg_head, err); }
return false //config missing required values
}
//TODO make case insensitive
let clip_category = match category.unwrap().as_str() {
"voice" => ClipCategory::Voice,
"music" => ClipCategory::Music,
"effects" => ClipCategory::Effects,
failed_category => {
warn!("[Asset Loading] Provided audio category '{}' is not a valid option. Related to config file {}.",
failed_category,
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
}
};
// assume image path is given as relative to config path hence taking the parent as a starting point.
let audio_path = match config_path.parent() {
Some(dir_path) => dir_path.join(file.ok().expect("File value is missing while loading.")),
//getting parent from path failed somehow. Shouldn't ever happen naturally.
None => {
warn!("{} {}",
"[Asset Loading] Parent missing from config path when processing",
config_path.to_str().unwrap_or("<error could not convert config path to str>"),
);
return false;
},
};
let asset_name = match audio_path.clone().into_os_string().into_string() {
Ok(name) => name,
Err(err) => {
warn!("[Asset Loading] {}",
err.into_string().unwrap_or("<Could not convert OsString err into string>".to_string()));
return false; //name is not UTF-8 compatable so abort
}
};
let audio_clip = AudioClip::new(audio_path, clip_category);
asset_db.add_asset(asset_name, AssetContainer::AudioClip(audio_clip));
return true;
}
| load_campaign_data | identifier_name |
mod.rs | use std::borrow::Cow;
use std::pin::Pin;
use std::sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
Arc,
};
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use backoff::backoff::Backoff;
use futures::prelude::*;
use futures::task::AtomicWaker;
use log::{debug, error, warn};
use memchr::memchr;
use serde::Serialize;
use thiserror::Error;
use tokio_postgres::types::ToSql;
mod arena;
mod builder;
mod placeholders;
use crate::imageboard;
pub use builder::SearchBuilder;
pub use placeholders::PLACEHOLDERS;
#[derive(Debug, Error)]
pub enum Error {
#[error("invalid database pool size")]
InvalidPoolSize,
#[error("invalid database URL provided: {}",.0)]
InvalidDatabase(tokio_postgres::Error),
#[error("A fatal error occured when trying to archive posts")]
ArchiveError,
#[error("database connection error: {}",.0)]
Pool(#[from] deadpool_postgres::PoolError),
#[error("database error: {}",.0)]
DB(#[from] tokio_postgres::Error),
#[error("io error: {}",.0)]
IO(#[from] std::io::Error),
}
struct SearchInner {
db_pool: deadpool_postgres::Pool,
max_inflight_posts: usize,
fail_on_save_error: bool,
retries_on_save_error: usize,
failed: AtomicBool,
inflight_posts: AtomicUsize,
waker: Arc<AtomicWaker>,
flush_waker: Arc<AtomicWaker>,
close_waker: Arc<AtomicWaker>,
metrics: Arc<SearchMetrics>,
process_tx: tokio::sync::mpsc::UnboundedSender<Option<Vec<imageboard::Post>>>,
}
#[derive(Debug, Serialize)]
pub struct Metrics {
pub posts: u64,
pub avg_insert_time_ms: f64,
pub save_errors: u64,
}
#[derive(Default, Debug)]
struct SearchMetrics {
posts: AtomicU64,
queries: AtomicU64,
query_time_ns: AtomicU64,
save_errors: AtomicU64,
}
impl SearchMetrics {
pub fn incr_posts(&self, count: u64) {
self.posts.fetch_add(count, Ordering::Relaxed);
}
pub fn incr_query_time(&self, dur: Duration) {
self.queries.fetch_add(1, Ordering::Relaxed);
self.query_time_ns
.fetch_add(dur.as_nanos() as u64, Ordering::Relaxed);
}
pub fn incr_save_error(&self, count: u64) {
self.save_errors.fetch_add(count, Ordering::Relaxed);
}
}
#[derive(Clone)]
pub struct SearchMetricsProvider {
inner: Arc<SearchInner>,
}
impl super::MetricsProvider for SearchMetricsProvider {
fn name(&self) -> &'static str {
"pg_search"
}
fn metrics(
&self,
) -> Pin<Box<dyn std::future::Future<Output = Box<dyn erased_serde::Serialize + Send>> + Send>>
{
let queries = self.inner.metrics.queries.load(Ordering::Acquire) as f64;
let tt = self.inner.metrics.query_time_ns.load(Ordering::Acquire) as f64;
let m = Metrics {
posts: self.inner.metrics.posts.load(Ordering::Acquire),
avg_insert_time_ms: queries / tt * 1_000_000.,
save_errors: self.inner.metrics.save_errors.load(Ordering::Acquire),
};
let m: Box<dyn erased_serde::Serialize + Send> = Box::new(m);
futures::future::ready(m).boxed()
}
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Search {
inner: Arc<SearchInner>,
}
impl Search {
#[allow(dead_code)]
pub fn builder() -> SearchBuilder {
SearchBuilder::default()
}
pub fn metrics_provider(&self) -> impl super::MetricsProvider {
SearchMetricsProvider {
inner: self.inner.clone(),
}
}
}
impl SearchInner {
async fn save_posts(&self, mut item: Vec<imageboard::Post>) -> Result<(), Error> {
let client = self.db_pool.get().await?;
while item.len() > 0 {
let start = Instant::now();
// Postgres only supports a maximum of 2^15 params
let (remain, posts) = if item.len() > 1280 {
let remain = item.split_off(1280);
(remain, item)
} else {
(vec![], item)
};
item = remain;
let rows = posts.len();
let query = "INSERT INTO
posts
(board, thread_no, post_no, subject, username, tripcode,
email, unique_id, since4_pass, country, filename,
image_hash, image_width, image_height, ts, comment, deleted,
ghost, sticky, spoiler, op, capcode) VALUES ";
let stmt = std::iter::once(Cow::Borrowed(query))
.chain((0..rows).map(|i| {
let z = i * 22;
Cow::Owned(
[
if i == 0 { "(" } else { "\n,(" },
PLACEHOLDERS[z], // board
",",
PLACEHOLDERS[z + 1], // thread_no
",",
PLACEHOLDERS[z + 2], // post_no
",to_tsvector(",
PLACEHOLDERS[z + 3], // subject
"),to_tsvector(",
PLACEHOLDERS[z + 4], // username
"),to_tsvector(",
PLACEHOLDERS[z + 5], // tripcode
"),to_tsvector(",
PLACEHOLDERS[z + 6], // email
"),",
PLACEHOLDERS[z + 7], // unique_id
",",
PLACEHOLDERS[z + 8], // since4_pass
",",
PLACEHOLDERS[z + 9], // country
",to_tsvector(REPLACE(",
PLACEHOLDERS[z + 10], // filename
",'.',' ')),",
PLACEHOLDERS[z + 11], // image_hash
",",
PLACEHOLDERS[z + 12], // image_width
",",
PLACEHOLDERS[z + 13], // image_height
",TO_TIMESTAMP(CAST(",
PLACEHOLDERS[z + 14], // ts
"::INT8 AS FLOAT8)),to_tsvector(",
PLACEHOLDERS[z + 15], // comment
"),",
PLACEHOLDERS[z + 16], // deleted
",",
PLACEHOLDERS[z + 17], // ghost
",",
PLACEHOLDERS[z + 18], // sticky
",",
PLACEHOLDERS[z + 19], // spoiler
",",
PLACEHOLDERS[z + 20], // op
",CAST(",
PLACEHOLDERS[z + 21], // capcode
"::INT8 AS INT4))",
]
.join(""),
)
}))
.chain(std::iter::once(Cow::Borrowed(
" ON CONFLICT (board, post_no) DO UPDATE SET
deleted = EXCLUDED.deleted,
sticky = EXCLUDED.sticky,
comment = COALESCE(EXCLUDED.comment, posts.comment);
",
)))
.collect::<String>();
let i64_rena = arena::Arena::new(posts.len() * 4);
let str_rena = arena::Arena::new(posts.len() * 4);
let params = (0..posts.len())
.into_iter()
.map(|i| {
let values: Box<[&(dyn ToSql + Sync)]> = Box::new([
str_rena.alloc(Some(posts[i].board.to_string())),
i64_rena.alloc(Some(posts[i].thread_no() as i64)),
i64_rena.alloc(Some(posts[i].no as i64)),
&posts[i].sub,
&posts[i].name,
&posts[i].trip,
&posts[i].email,
&posts[i].id,
&posts[i].since4pass,
str_rena.alloc(posts[i].poster_country()),
str_rena.alloc(posts[i].media_filename()),
&posts[i].md5,
&posts[i].w,
&posts[i].h,
i64_rena.alloc(Some(posts[i].time as i64)),
str_rena.alloc(posts[i].comment().map(|x| str_sanitize(x))),
&posts[i].deleted,
&false,
&posts[i].sticky,
&posts[i].spoiler,
if posts[i].is_op() { &true } else { &false },
i64_rena.alloc(posts[i].short_capcode().chars().next().map(|c| c as i64)),
]);
values.into_vec()
})
.flatten()
.collect::<Vec<_>>();
let mut attempts = 0;
let mut backoff = backoff::ExponentialBackoff::default();
backoff.max_elapsed_time = None;
loop {
let r = client.execute(stmt.as_str(), ¶ms).await;
match r {
Ok(_) => break,
Err(err) => {
if attempts >= self.retries_on_save_error {
return Err(Error::from(err));
}
attempts += 1;
if let Some(b) = backoff.next_backoff() {
tokio::time::delay_for(b).await;
}
continue;
}
}
}
self.metrics.incr_posts(rows as u64);
self.metrics.incr_query_time(start.elapsed());
self.notify_post(rows);
// Since values contains references to data in the'renas,
// the values must be dropped before we drop the'renas
drop(params);
drop(i64_rena);
drop(str_rena);
}
Ok(())
}
async fn send_posts(self: Arc<Self>, item: Vec<imageboard::Post>) {
let board = item[0].board;
let thread_no = item[0].thread_no();
let post_no = item[0].no;
let sz = item.len();
match self.save_posts(item).await {
Ok(_) => debug!(
"Flushed {} posts to postgres. [First]: {}/{}/{}",
sz, board, thread_no, post_no
),
Err(err) => {
error!(
"Failed to save data for {} posts [First]: {}/{}/{}: {}",
sz, board, thread_no, post_no, err
);
if!self.fail_on_save_error {
warn!("Some posts were unable to be archived, however the error isn't being treated as fatal. Some posts may be lost.")
}
self.metrics.incr_save_error(1);
self.failed.store(true, Ordering::SeqCst);
}
}
}
fn notify_post(&self, no_posts: usize) {
let old = self.inflight_posts.fetch_sub(no_posts, Ordering::AcqRel);
let curr = old - no_posts;
if curr < self.max_inflight_posts {
self.waker.wake();
}
if curr == 0 {
self.flush_waker.wake();
self.close_waker.wake();
}
}
fn is_ready(&self) -> bool {
let posts = self.inflight_posts.load(Ordering::Acquire);
posts < self.max_inflight_posts
}
fn is_empty(&self) -> bool {
let posts = self.inflight_posts.load(Ordering::Acquire);
posts == 0
}
fn has_failed(&self) -> bool {
return self.fail_on_save_error && self.failed.load(Ordering::Relaxed);
}
}
impl Sink<Vec<imageboard::Post>> for Search {
type Error = Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.waker.register(cx.waker());
match self.inner.is_ready() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
fn start_send(self: Pin<&mut Self>, item: Vec<imageboard::Post>) -> Result<(), Self::Error> {
if item.len() > 0 {
self.inner
.inflight_posts
.fetch_add(item.len(), Ordering::AcqRel);
self.inner.process_tx.send(Some(item)).unwrap();
}
Ok(())
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.flush_waker.register(cx.waker());
match self.inner.is_empty() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
let _ = self.inner.process_tx.send(None);
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.close_waker.register(cx.waker());
match self.inner.is_empty() {
true => Poll::Ready(Ok(())), |
fn str_sanitize(input: String) -> String {
match memchr(0, input.as_bytes()) {
Some(_) => input.replace(char::from(0), ""),
None => input,
}
} | false => Poll::Pending,
}
}
} | random_line_split |
mod.rs | use std::borrow::Cow;
use std::pin::Pin;
use std::sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
Arc,
};
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use backoff::backoff::Backoff;
use futures::prelude::*;
use futures::task::AtomicWaker;
use log::{debug, error, warn};
use memchr::memchr;
use serde::Serialize;
use thiserror::Error;
use tokio_postgres::types::ToSql;
mod arena;
mod builder;
mod placeholders;
use crate::imageboard;
pub use builder::SearchBuilder;
pub use placeholders::PLACEHOLDERS;
#[derive(Debug, Error)]
pub enum Error {
#[error("invalid database pool size")]
InvalidPoolSize,
#[error("invalid database URL provided: {}",.0)]
InvalidDatabase(tokio_postgres::Error),
#[error("A fatal error occured when trying to archive posts")]
ArchiveError,
#[error("database connection error: {}",.0)]
Pool(#[from] deadpool_postgres::PoolError),
#[error("database error: {}",.0)]
DB(#[from] tokio_postgres::Error),
#[error("io error: {}",.0)]
IO(#[from] std::io::Error),
}
struct SearchInner {
db_pool: deadpool_postgres::Pool,
max_inflight_posts: usize,
fail_on_save_error: bool,
retries_on_save_error: usize,
failed: AtomicBool,
inflight_posts: AtomicUsize,
waker: Arc<AtomicWaker>,
flush_waker: Arc<AtomicWaker>,
close_waker: Arc<AtomicWaker>,
metrics: Arc<SearchMetrics>,
process_tx: tokio::sync::mpsc::UnboundedSender<Option<Vec<imageboard::Post>>>,
}
#[derive(Debug, Serialize)]
pub struct Metrics {
pub posts: u64,
pub avg_insert_time_ms: f64,
pub save_errors: u64,
}
#[derive(Default, Debug)]
struct SearchMetrics {
posts: AtomicU64,
queries: AtomicU64,
query_time_ns: AtomicU64,
save_errors: AtomicU64,
}
impl SearchMetrics {
pub fn incr_posts(&self, count: u64) {
self.posts.fetch_add(count, Ordering::Relaxed);
}
pub fn incr_query_time(&self, dur: Duration) {
self.queries.fetch_add(1, Ordering::Relaxed);
self.query_time_ns
.fetch_add(dur.as_nanos() as u64, Ordering::Relaxed);
}
pub fn incr_save_error(&self, count: u64) {
self.save_errors.fetch_add(count, Ordering::Relaxed);
}
}
#[derive(Clone)]
pub struct SearchMetricsProvider {
inner: Arc<SearchInner>,
}
impl super::MetricsProvider for SearchMetricsProvider {
fn name(&self) -> &'static str {
"pg_search"
}
fn metrics(
&self,
) -> Pin<Box<dyn std::future::Future<Output = Box<dyn erased_serde::Serialize + Send>> + Send>>
{
let queries = self.inner.metrics.queries.load(Ordering::Acquire) as f64;
let tt = self.inner.metrics.query_time_ns.load(Ordering::Acquire) as f64;
let m = Metrics {
posts: self.inner.metrics.posts.load(Ordering::Acquire),
avg_insert_time_ms: queries / tt * 1_000_000.,
save_errors: self.inner.metrics.save_errors.load(Ordering::Acquire),
};
let m: Box<dyn erased_serde::Serialize + Send> = Box::new(m);
futures::future::ready(m).boxed()
}
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Search {
inner: Arc<SearchInner>,
}
impl Search {
#[allow(dead_code)]
pub fn builder() -> SearchBuilder {
SearchBuilder::default()
}
pub fn metrics_provider(&self) -> impl super::MetricsProvider {
SearchMetricsProvider {
inner: self.inner.clone(),
}
}
}
impl SearchInner {
async fn save_posts(&self, mut item: Vec<imageboard::Post>) -> Result<(), Error> {
let client = self.db_pool.get().await?;
while item.len() > 0 {
let start = Instant::now();
// Postgres only supports a maximum of 2^15 params
let (remain, posts) = if item.len() > 1280 {
let remain = item.split_off(1280);
(remain, item)
} else {
(vec![], item)
};
item = remain;
let rows = posts.len();
let query = "INSERT INTO
posts
(board, thread_no, post_no, subject, username, tripcode,
email, unique_id, since4_pass, country, filename,
image_hash, image_width, image_height, ts, comment, deleted,
ghost, sticky, spoiler, op, capcode) VALUES ";
let stmt = std::iter::once(Cow::Borrowed(query))
.chain((0..rows).map(|i| {
let z = i * 22;
Cow::Owned(
[
if i == 0 { "(" } else { "\n,(" },
PLACEHOLDERS[z], // board
",",
PLACEHOLDERS[z + 1], // thread_no
",",
PLACEHOLDERS[z + 2], // post_no
",to_tsvector(",
PLACEHOLDERS[z + 3], // subject
"),to_tsvector(",
PLACEHOLDERS[z + 4], // username
"),to_tsvector(",
PLACEHOLDERS[z + 5], // tripcode
"),to_tsvector(",
PLACEHOLDERS[z + 6], // email
"),",
PLACEHOLDERS[z + 7], // unique_id
",",
PLACEHOLDERS[z + 8], // since4_pass
",",
PLACEHOLDERS[z + 9], // country
",to_tsvector(REPLACE(",
PLACEHOLDERS[z + 10], // filename
",'.',' ')),",
PLACEHOLDERS[z + 11], // image_hash
",",
PLACEHOLDERS[z + 12], // image_width
",",
PLACEHOLDERS[z + 13], // image_height
",TO_TIMESTAMP(CAST(",
PLACEHOLDERS[z + 14], // ts
"::INT8 AS FLOAT8)),to_tsvector(",
PLACEHOLDERS[z + 15], // comment
"),",
PLACEHOLDERS[z + 16], // deleted
",",
PLACEHOLDERS[z + 17], // ghost
",",
PLACEHOLDERS[z + 18], // sticky
",",
PLACEHOLDERS[z + 19], // spoiler
",",
PLACEHOLDERS[z + 20], // op
",CAST(",
PLACEHOLDERS[z + 21], // capcode
"::INT8 AS INT4))",
]
.join(""),
)
}))
.chain(std::iter::once(Cow::Borrowed(
" ON CONFLICT (board, post_no) DO UPDATE SET
deleted = EXCLUDED.deleted,
sticky = EXCLUDED.sticky,
comment = COALESCE(EXCLUDED.comment, posts.comment);
",
)))
.collect::<String>();
let i64_rena = arena::Arena::new(posts.len() * 4);
let str_rena = arena::Arena::new(posts.len() * 4);
let params = (0..posts.len())
.into_iter()
.map(|i| {
let values: Box<[&(dyn ToSql + Sync)]> = Box::new([
str_rena.alloc(Some(posts[i].board.to_string())),
i64_rena.alloc(Some(posts[i].thread_no() as i64)),
i64_rena.alloc(Some(posts[i].no as i64)),
&posts[i].sub,
&posts[i].name,
&posts[i].trip,
&posts[i].email,
&posts[i].id,
&posts[i].since4pass,
str_rena.alloc(posts[i].poster_country()),
str_rena.alloc(posts[i].media_filename()),
&posts[i].md5,
&posts[i].w,
&posts[i].h,
i64_rena.alloc(Some(posts[i].time as i64)),
str_rena.alloc(posts[i].comment().map(|x| str_sanitize(x))),
&posts[i].deleted,
&false,
&posts[i].sticky,
&posts[i].spoiler,
if posts[i].is_op() { &true } else { &false },
i64_rena.alloc(posts[i].short_capcode().chars().next().map(|c| c as i64)),
]);
values.into_vec()
})
.flatten()
.collect::<Vec<_>>();
let mut attempts = 0;
let mut backoff = backoff::ExponentialBackoff::default();
backoff.max_elapsed_time = None;
loop {
let r = client.execute(stmt.as_str(), ¶ms).await;
match r {
Ok(_) => break,
Err(err) => {
if attempts >= self.retries_on_save_error {
return Err(Error::from(err));
}
attempts += 1;
if let Some(b) = backoff.next_backoff() {
tokio::time::delay_for(b).await;
}
continue;
}
}
}
self.metrics.incr_posts(rows as u64);
self.metrics.incr_query_time(start.elapsed());
self.notify_post(rows);
// Since values contains references to data in the'renas,
// the values must be dropped before we drop the'renas
drop(params);
drop(i64_rena);
drop(str_rena);
}
Ok(())
}
async fn send_posts(self: Arc<Self>, item: Vec<imageboard::Post>) {
let board = item[0].board;
let thread_no = item[0].thread_no();
let post_no = item[0].no;
let sz = item.len();
match self.save_posts(item).await {
Ok(_) => debug!(
"Flushed {} posts to postgres. [First]: {}/{}/{}",
sz, board, thread_no, post_no
),
Err(err) => {
error!(
"Failed to save data for {} posts [First]: {}/{}/{}: {}",
sz, board, thread_no, post_no, err
);
if!self.fail_on_save_error {
warn!("Some posts were unable to be archived, however the error isn't being treated as fatal. Some posts may be lost.")
}
self.metrics.incr_save_error(1);
self.failed.store(true, Ordering::SeqCst);
}
}
}
fn notify_post(&self, no_posts: usize) {
let old = self.inflight_posts.fetch_sub(no_posts, Ordering::AcqRel);
let curr = old - no_posts;
if curr < self.max_inflight_posts {
self.waker.wake();
}
if curr == 0 {
self.flush_waker.wake();
self.close_waker.wake();
}
}
fn is_ready(&self) -> bool {
let posts = self.inflight_posts.load(Ordering::Acquire);
posts < self.max_inflight_posts
}
fn is_empty(&self) -> bool {
let posts = self.inflight_posts.load(Ordering::Acquire);
posts == 0
}
fn has_failed(&self) -> bool {
return self.fail_on_save_error && self.failed.load(Ordering::Relaxed);
}
}
impl Sink<Vec<imageboard::Post>> for Search {
type Error = Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.waker.register(cx.waker());
match self.inner.is_ready() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
fn | (self: Pin<&mut Self>, item: Vec<imageboard::Post>) -> Result<(), Self::Error> {
if item.len() > 0 {
self.inner
.inflight_posts
.fetch_add(item.len(), Ordering::AcqRel);
self.inner.process_tx.send(Some(item)).unwrap();
}
Ok(())
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.flush_waker.register(cx.waker());
match self.inner.is_empty() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
let _ = self.inner.process_tx.send(None);
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.close_waker.register(cx.waker());
match self.inner.is_empty() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
}
fn str_sanitize(input: String) -> String {
match memchr(0, input.as_bytes()) {
Some(_) => input.replace(char::from(0), ""),
None => input,
}
}
| start_send | identifier_name |
mod.rs | use std::borrow::Cow;
use std::pin::Pin;
use std::sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
Arc,
};
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use backoff::backoff::Backoff;
use futures::prelude::*;
use futures::task::AtomicWaker;
use log::{debug, error, warn};
use memchr::memchr;
use serde::Serialize;
use thiserror::Error;
use tokio_postgres::types::ToSql;
mod arena;
mod builder;
mod placeholders;
use crate::imageboard;
pub use builder::SearchBuilder;
pub use placeholders::PLACEHOLDERS;
#[derive(Debug, Error)]
pub enum Error {
#[error("invalid database pool size")]
InvalidPoolSize,
#[error("invalid database URL provided: {}",.0)]
InvalidDatabase(tokio_postgres::Error),
#[error("A fatal error occured when trying to archive posts")]
ArchiveError,
#[error("database connection error: {}",.0)]
Pool(#[from] deadpool_postgres::PoolError),
#[error("database error: {}",.0)]
DB(#[from] tokio_postgres::Error),
#[error("io error: {}",.0)]
IO(#[from] std::io::Error),
}
struct SearchInner {
db_pool: deadpool_postgres::Pool,
max_inflight_posts: usize,
fail_on_save_error: bool,
retries_on_save_error: usize,
failed: AtomicBool,
inflight_posts: AtomicUsize,
waker: Arc<AtomicWaker>,
flush_waker: Arc<AtomicWaker>,
close_waker: Arc<AtomicWaker>,
metrics: Arc<SearchMetrics>,
process_tx: tokio::sync::mpsc::UnboundedSender<Option<Vec<imageboard::Post>>>,
}
#[derive(Debug, Serialize)]
pub struct Metrics {
pub posts: u64,
pub avg_insert_time_ms: f64,
pub save_errors: u64,
}
#[derive(Default, Debug)]
struct SearchMetrics {
posts: AtomicU64,
queries: AtomicU64,
query_time_ns: AtomicU64,
save_errors: AtomicU64,
}
impl SearchMetrics {
pub fn incr_posts(&self, count: u64) {
self.posts.fetch_add(count, Ordering::Relaxed);
}
pub fn incr_query_time(&self, dur: Duration) {
self.queries.fetch_add(1, Ordering::Relaxed);
self.query_time_ns
.fetch_add(dur.as_nanos() as u64, Ordering::Relaxed);
}
pub fn incr_save_error(&self, count: u64) {
self.save_errors.fetch_add(count, Ordering::Relaxed);
}
}
#[derive(Clone)]
pub struct SearchMetricsProvider {
inner: Arc<SearchInner>,
}
impl super::MetricsProvider for SearchMetricsProvider {
fn name(&self) -> &'static str {
"pg_search"
}
fn metrics(
&self,
) -> Pin<Box<dyn std::future::Future<Output = Box<dyn erased_serde::Serialize + Send>> + Send>>
{
let queries = self.inner.metrics.queries.load(Ordering::Acquire) as f64;
let tt = self.inner.metrics.query_time_ns.load(Ordering::Acquire) as f64;
let m = Metrics {
posts: self.inner.metrics.posts.load(Ordering::Acquire),
avg_insert_time_ms: queries / tt * 1_000_000.,
save_errors: self.inner.metrics.save_errors.load(Ordering::Acquire),
};
let m: Box<dyn erased_serde::Serialize + Send> = Box::new(m);
futures::future::ready(m).boxed()
}
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Search {
inner: Arc<SearchInner>,
}
impl Search {
#[allow(dead_code)]
pub fn builder() -> SearchBuilder {
SearchBuilder::default()
}
pub fn metrics_provider(&self) -> impl super::MetricsProvider {
SearchMetricsProvider {
inner: self.inner.clone(),
}
}
}
impl SearchInner {
async fn save_posts(&self, mut item: Vec<imageboard::Post>) -> Result<(), Error> {
let client = self.db_pool.get().await?;
while item.len() > 0 {
let start = Instant::now();
// Postgres only supports a maximum of 2^15 params
let (remain, posts) = if item.len() > 1280 {
let remain = item.split_off(1280);
(remain, item)
} else {
(vec![], item)
};
item = remain;
let rows = posts.len();
let query = "INSERT INTO
posts
(board, thread_no, post_no, subject, username, tripcode,
email, unique_id, since4_pass, country, filename,
image_hash, image_width, image_height, ts, comment, deleted,
ghost, sticky, spoiler, op, capcode) VALUES ";
let stmt = std::iter::once(Cow::Borrowed(query))
.chain((0..rows).map(|i| {
let z = i * 22;
Cow::Owned(
[
if i == 0 { "(" } else { "\n,(" },
PLACEHOLDERS[z], // board
",",
PLACEHOLDERS[z + 1], // thread_no
",",
PLACEHOLDERS[z + 2], // post_no
",to_tsvector(",
PLACEHOLDERS[z + 3], // subject
"),to_tsvector(",
PLACEHOLDERS[z + 4], // username
"),to_tsvector(",
PLACEHOLDERS[z + 5], // tripcode
"),to_tsvector(",
PLACEHOLDERS[z + 6], // email
"),",
PLACEHOLDERS[z + 7], // unique_id
",",
PLACEHOLDERS[z + 8], // since4_pass
",",
PLACEHOLDERS[z + 9], // country
",to_tsvector(REPLACE(",
PLACEHOLDERS[z + 10], // filename
",'.',' ')),",
PLACEHOLDERS[z + 11], // image_hash
",",
PLACEHOLDERS[z + 12], // image_width
",",
PLACEHOLDERS[z + 13], // image_height
",TO_TIMESTAMP(CAST(",
PLACEHOLDERS[z + 14], // ts
"::INT8 AS FLOAT8)),to_tsvector(",
PLACEHOLDERS[z + 15], // comment
"),",
PLACEHOLDERS[z + 16], // deleted
",",
PLACEHOLDERS[z + 17], // ghost
",",
PLACEHOLDERS[z + 18], // sticky
",",
PLACEHOLDERS[z + 19], // spoiler
",",
PLACEHOLDERS[z + 20], // op
",CAST(",
PLACEHOLDERS[z + 21], // capcode
"::INT8 AS INT4))",
]
.join(""),
)
}))
.chain(std::iter::once(Cow::Borrowed(
" ON CONFLICT (board, post_no) DO UPDATE SET
deleted = EXCLUDED.deleted,
sticky = EXCLUDED.sticky,
comment = COALESCE(EXCLUDED.comment, posts.comment);
",
)))
.collect::<String>();
let i64_rena = arena::Arena::new(posts.len() * 4);
let str_rena = arena::Arena::new(posts.len() * 4);
let params = (0..posts.len())
.into_iter()
.map(|i| {
let values: Box<[&(dyn ToSql + Sync)]> = Box::new([
str_rena.alloc(Some(posts[i].board.to_string())),
i64_rena.alloc(Some(posts[i].thread_no() as i64)),
i64_rena.alloc(Some(posts[i].no as i64)),
&posts[i].sub,
&posts[i].name,
&posts[i].trip,
&posts[i].email,
&posts[i].id,
&posts[i].since4pass,
str_rena.alloc(posts[i].poster_country()),
str_rena.alloc(posts[i].media_filename()),
&posts[i].md5,
&posts[i].w,
&posts[i].h,
i64_rena.alloc(Some(posts[i].time as i64)),
str_rena.alloc(posts[i].comment().map(|x| str_sanitize(x))),
&posts[i].deleted,
&false,
&posts[i].sticky,
&posts[i].spoiler,
if posts[i].is_op() { &true } else { &false },
i64_rena.alloc(posts[i].short_capcode().chars().next().map(|c| c as i64)),
]);
values.into_vec()
})
.flatten()
.collect::<Vec<_>>();
let mut attempts = 0;
let mut backoff = backoff::ExponentialBackoff::default();
backoff.max_elapsed_time = None;
loop {
let r = client.execute(stmt.as_str(), ¶ms).await;
match r {
Ok(_) => break,
Err(err) => {
if attempts >= self.retries_on_save_error {
return Err(Error::from(err));
}
attempts += 1;
if let Some(b) = backoff.next_backoff() {
tokio::time::delay_for(b).await;
}
continue;
}
}
}
self.metrics.incr_posts(rows as u64);
self.metrics.incr_query_time(start.elapsed());
self.notify_post(rows);
// Since values contains references to data in the'renas,
// the values must be dropped before we drop the'renas
drop(params);
drop(i64_rena);
drop(str_rena);
}
Ok(())
}
async fn send_posts(self: Arc<Self>, item: Vec<imageboard::Post>) {
let board = item[0].board;
let thread_no = item[0].thread_no();
let post_no = item[0].no;
let sz = item.len();
match self.save_posts(item).await {
Ok(_) => debug!(
"Flushed {} posts to postgres. [First]: {}/{}/{}",
sz, board, thread_no, post_no
),
Err(err) => {
error!(
"Failed to save data for {} posts [First]: {}/{}/{}: {}",
sz, board, thread_no, post_no, err
);
if!self.fail_on_save_error {
warn!("Some posts were unable to be archived, however the error isn't being treated as fatal. Some posts may be lost.")
}
self.metrics.incr_save_error(1);
self.failed.store(true, Ordering::SeqCst);
}
}
}
fn notify_post(&self, no_posts: usize) {
let old = self.inflight_posts.fetch_sub(no_posts, Ordering::AcqRel);
let curr = old - no_posts;
if curr < self.max_inflight_posts {
self.waker.wake();
}
if curr == 0 {
self.flush_waker.wake();
self.close_waker.wake();
}
}
fn is_ready(&self) -> bool {
let posts = self.inflight_posts.load(Ordering::Acquire);
posts < self.max_inflight_posts
}
fn is_empty(&self) -> bool {
let posts = self.inflight_posts.load(Ordering::Acquire);
posts == 0
}
fn has_failed(&self) -> bool {
return self.fail_on_save_error && self.failed.load(Ordering::Relaxed);
}
}
impl Sink<Vec<imageboard::Post>> for Search {
type Error = Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.waker.register(cx.waker());
match self.inner.is_ready() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
fn start_send(self: Pin<&mut Self>, item: Vec<imageboard::Post>) -> Result<(), Self::Error> {
if item.len() > 0 {
self.inner
.inflight_posts
.fetch_add(item.len(), Ordering::AcqRel);
self.inner.process_tx.send(Some(item)).unwrap();
}
Ok(())
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
if self.inner.has_failed() |
self.inner.flush_waker.register(cx.waker());
match self.inner.is_empty() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
let _ = self.inner.process_tx.send(None);
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.close_waker.register(cx.waker());
match self.inner.is_empty() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
}
fn str_sanitize(input: String) -> String {
match memchr(0, input.as_bytes()) {
Some(_) => input.replace(char::from(0), ""),
None => input,
}
}
| {
return Poll::Ready(Err(Error::ArchiveError));
} | conditional_block |
mod.rs | use std::borrow::Cow;
use std::pin::Pin;
use std::sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
Arc,
};
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use backoff::backoff::Backoff;
use futures::prelude::*;
use futures::task::AtomicWaker;
use log::{debug, error, warn};
use memchr::memchr;
use serde::Serialize;
use thiserror::Error;
use tokio_postgres::types::ToSql;
mod arena;
mod builder;
mod placeholders;
use crate::imageboard;
pub use builder::SearchBuilder;
pub use placeholders::PLACEHOLDERS;
#[derive(Debug, Error)]
pub enum Error {
#[error("invalid database pool size")]
InvalidPoolSize,
#[error("invalid database URL provided: {}",.0)]
InvalidDatabase(tokio_postgres::Error),
#[error("A fatal error occured when trying to archive posts")]
ArchiveError,
#[error("database connection error: {}",.0)]
Pool(#[from] deadpool_postgres::PoolError),
#[error("database error: {}",.0)]
DB(#[from] tokio_postgres::Error),
#[error("io error: {}",.0)]
IO(#[from] std::io::Error),
}
struct SearchInner {
db_pool: deadpool_postgres::Pool,
max_inflight_posts: usize,
fail_on_save_error: bool,
retries_on_save_error: usize,
failed: AtomicBool,
inflight_posts: AtomicUsize,
waker: Arc<AtomicWaker>,
flush_waker: Arc<AtomicWaker>,
close_waker: Arc<AtomicWaker>,
metrics: Arc<SearchMetrics>,
process_tx: tokio::sync::mpsc::UnboundedSender<Option<Vec<imageboard::Post>>>,
}
#[derive(Debug, Serialize)]
pub struct Metrics {
pub posts: u64,
pub avg_insert_time_ms: f64,
pub save_errors: u64,
}
#[derive(Default, Debug)]
struct SearchMetrics {
posts: AtomicU64,
queries: AtomicU64,
query_time_ns: AtomicU64,
save_errors: AtomicU64,
}
impl SearchMetrics {
pub fn incr_posts(&self, count: u64) {
self.posts.fetch_add(count, Ordering::Relaxed);
}
pub fn incr_query_time(&self, dur: Duration) {
self.queries.fetch_add(1, Ordering::Relaxed);
self.query_time_ns
.fetch_add(dur.as_nanos() as u64, Ordering::Relaxed);
}
pub fn incr_save_error(&self, count: u64) {
self.save_errors.fetch_add(count, Ordering::Relaxed);
}
}
#[derive(Clone)]
pub struct SearchMetricsProvider {
inner: Arc<SearchInner>,
}
impl super::MetricsProvider for SearchMetricsProvider {
fn name(&self) -> &'static str {
"pg_search"
}
fn metrics(
&self,
) -> Pin<Box<dyn std::future::Future<Output = Box<dyn erased_serde::Serialize + Send>> + Send>>
{
let queries = self.inner.metrics.queries.load(Ordering::Acquire) as f64;
let tt = self.inner.metrics.query_time_ns.load(Ordering::Acquire) as f64;
let m = Metrics {
posts: self.inner.metrics.posts.load(Ordering::Acquire),
avg_insert_time_ms: queries / tt * 1_000_000.,
save_errors: self.inner.metrics.save_errors.load(Ordering::Acquire),
};
let m: Box<dyn erased_serde::Serialize + Send> = Box::new(m);
futures::future::ready(m).boxed()
}
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Search {
inner: Arc<SearchInner>,
}
impl Search {
#[allow(dead_code)]
pub fn builder() -> SearchBuilder {
SearchBuilder::default()
}
pub fn metrics_provider(&self) -> impl super::MetricsProvider {
SearchMetricsProvider {
inner: self.inner.clone(),
}
}
}
impl SearchInner {
async fn save_posts(&self, mut item: Vec<imageboard::Post>) -> Result<(), Error> {
let client = self.db_pool.get().await?;
while item.len() > 0 {
let start = Instant::now();
// Postgres only supports a maximum of 2^15 params
let (remain, posts) = if item.len() > 1280 {
let remain = item.split_off(1280);
(remain, item)
} else {
(vec![], item)
};
item = remain;
let rows = posts.len();
let query = "INSERT INTO
posts
(board, thread_no, post_no, subject, username, tripcode,
email, unique_id, since4_pass, country, filename,
image_hash, image_width, image_height, ts, comment, deleted,
ghost, sticky, spoiler, op, capcode) VALUES ";
let stmt = std::iter::once(Cow::Borrowed(query))
.chain((0..rows).map(|i| {
let z = i * 22;
Cow::Owned(
[
if i == 0 { "(" } else { "\n,(" },
PLACEHOLDERS[z], // board
",",
PLACEHOLDERS[z + 1], // thread_no
",",
PLACEHOLDERS[z + 2], // post_no
",to_tsvector(",
PLACEHOLDERS[z + 3], // subject
"),to_tsvector(",
PLACEHOLDERS[z + 4], // username
"),to_tsvector(",
PLACEHOLDERS[z + 5], // tripcode
"),to_tsvector(",
PLACEHOLDERS[z + 6], // email
"),",
PLACEHOLDERS[z + 7], // unique_id
",",
PLACEHOLDERS[z + 8], // since4_pass
",",
PLACEHOLDERS[z + 9], // country
",to_tsvector(REPLACE(",
PLACEHOLDERS[z + 10], // filename
",'.',' ')),",
PLACEHOLDERS[z + 11], // image_hash
",",
PLACEHOLDERS[z + 12], // image_width
",",
PLACEHOLDERS[z + 13], // image_height
",TO_TIMESTAMP(CAST(",
PLACEHOLDERS[z + 14], // ts
"::INT8 AS FLOAT8)),to_tsvector(",
PLACEHOLDERS[z + 15], // comment
"),",
PLACEHOLDERS[z + 16], // deleted
",",
PLACEHOLDERS[z + 17], // ghost
",",
PLACEHOLDERS[z + 18], // sticky
",",
PLACEHOLDERS[z + 19], // spoiler
",",
PLACEHOLDERS[z + 20], // op
",CAST(",
PLACEHOLDERS[z + 21], // capcode
"::INT8 AS INT4))",
]
.join(""),
)
}))
.chain(std::iter::once(Cow::Borrowed(
" ON CONFLICT (board, post_no) DO UPDATE SET
deleted = EXCLUDED.deleted,
sticky = EXCLUDED.sticky,
comment = COALESCE(EXCLUDED.comment, posts.comment);
",
)))
.collect::<String>();
let i64_rena = arena::Arena::new(posts.len() * 4);
let str_rena = arena::Arena::new(posts.len() * 4);
let params = (0..posts.len())
.into_iter()
.map(|i| {
let values: Box<[&(dyn ToSql + Sync)]> = Box::new([
str_rena.alloc(Some(posts[i].board.to_string())),
i64_rena.alloc(Some(posts[i].thread_no() as i64)),
i64_rena.alloc(Some(posts[i].no as i64)),
&posts[i].sub,
&posts[i].name,
&posts[i].trip,
&posts[i].email,
&posts[i].id,
&posts[i].since4pass,
str_rena.alloc(posts[i].poster_country()),
str_rena.alloc(posts[i].media_filename()),
&posts[i].md5,
&posts[i].w,
&posts[i].h,
i64_rena.alloc(Some(posts[i].time as i64)),
str_rena.alloc(posts[i].comment().map(|x| str_sanitize(x))),
&posts[i].deleted,
&false,
&posts[i].sticky,
&posts[i].spoiler,
if posts[i].is_op() { &true } else { &false },
i64_rena.alloc(posts[i].short_capcode().chars().next().map(|c| c as i64)),
]);
values.into_vec()
})
.flatten()
.collect::<Vec<_>>();
let mut attempts = 0;
let mut backoff = backoff::ExponentialBackoff::default();
backoff.max_elapsed_time = None;
loop {
let r = client.execute(stmt.as_str(), ¶ms).await;
match r {
Ok(_) => break,
Err(err) => {
if attempts >= self.retries_on_save_error {
return Err(Error::from(err));
}
attempts += 1;
if let Some(b) = backoff.next_backoff() {
tokio::time::delay_for(b).await;
}
continue;
}
}
}
self.metrics.incr_posts(rows as u64);
self.metrics.incr_query_time(start.elapsed());
self.notify_post(rows);
// Since values contains references to data in the'renas,
// the values must be dropped before we drop the'renas
drop(params);
drop(i64_rena);
drop(str_rena);
}
Ok(())
}
async fn send_posts(self: Arc<Self>, item: Vec<imageboard::Post>) {
let board = item[0].board;
let thread_no = item[0].thread_no();
let post_no = item[0].no;
let sz = item.len();
match self.save_posts(item).await {
Ok(_) => debug!(
"Flushed {} posts to postgres. [First]: {}/{}/{}",
sz, board, thread_no, post_no
),
Err(err) => {
error!(
"Failed to save data for {} posts [First]: {}/{}/{}: {}",
sz, board, thread_no, post_no, err
);
if!self.fail_on_save_error {
warn!("Some posts were unable to be archived, however the error isn't being treated as fatal. Some posts may be lost.")
}
self.metrics.incr_save_error(1);
self.failed.store(true, Ordering::SeqCst);
}
}
}
fn notify_post(&self, no_posts: usize) {
let old = self.inflight_posts.fetch_sub(no_posts, Ordering::AcqRel);
let curr = old - no_posts;
if curr < self.max_inflight_posts {
self.waker.wake();
}
if curr == 0 {
self.flush_waker.wake();
self.close_waker.wake();
}
}
fn is_ready(&self) -> bool |
fn is_empty(&self) -> bool {
let posts = self.inflight_posts.load(Ordering::Acquire);
posts == 0
}
fn has_failed(&self) -> bool {
return self.fail_on_save_error && self.failed.load(Ordering::Relaxed);
}
}
impl Sink<Vec<imageboard::Post>> for Search {
type Error = Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.waker.register(cx.waker());
match self.inner.is_ready() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
fn start_send(self: Pin<&mut Self>, item: Vec<imageboard::Post>) -> Result<(), Self::Error> {
if item.len() > 0 {
self.inner
.inflight_posts
.fetch_add(item.len(), Ordering::AcqRel);
self.inner.process_tx.send(Some(item)).unwrap();
}
Ok(())
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.flush_waker.register(cx.waker());
match self.inner.is_empty() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
let _ = self.inner.process_tx.send(None);
if self.inner.has_failed() {
return Poll::Ready(Err(Error::ArchiveError));
}
self.inner.close_waker.register(cx.waker());
match self.inner.is_empty() {
true => Poll::Ready(Ok(())),
false => Poll::Pending,
}
}
}
fn str_sanitize(input: String) -> String {
match memchr(0, input.as_bytes()) {
Some(_) => input.replace(char::from(0), ""),
None => input,
}
}
| {
let posts = self.inflight_posts.load(Ordering::Acquire);
posts < self.max_inflight_posts
} | identifier_body |
metadata.rs | from a cargo workspace root
fn generate_lockfile(&self, crate_root_dir: &Utf8Path) -> Result<Lockfile> {
let lockfile_path = crate_root_dir.join("Cargo.lock");
// Generate lockfile
let output = std::process::Command::new(&self.cargo_bin_path)
.arg("generate-lockfile")
.current_dir(crate_root_dir)
.output()
.with_context(|| format!("Generating lockfile in {}", crate_root_dir))?;
if!output.status.success() {
anyhow::bail!(
"Failed to generate lockfile in {}: {}",
crate_root_dir,
String::from_utf8_lossy(&output.stderr)
);
}
// Load lockfile contents
Lockfile::load(&lockfile_path)
.with_context(|| format!("Failed to load lockfile: {}", lockfile_path))
}
}
/// A struct containing all metadata about a project with which to plan generated output files for
#[derive(Debug, Clone)]
pub struct RazeMetadata {
// `cargo metadata` output of the current project
pub metadata: Metadata,
// The absolute path to the current project's cargo workspace root. Note that the workspace
// root in `metadata` will be inside of a temporary directory. For details see:
// https://doc.rust-lang.org/cargo/reference/workspaces.html#root-package
pub cargo_workspace_root: Utf8PathBuf,
// The metadata of a lockfile that was generated as a result of fetching metadata
pub lockfile: Option<Lockfile>,
// A map of all known crates with checksums. Use `checksums_for` to access data from this map.
pub checksums: HashMap<String, String>,
// A map of crates to their enabled general and per-platform features.
pub features: BTreeMap<PackageId, Features>,
}
impl RazeMetadata {
/// Get the checksum of a crate using a unique formatter.
pub fn checksum_for(&self, name: &str, version: &str) -> Option<&String> {
self.checksums.get(&package_ident(name, version))
}
}
/// Create a symlink file on unix systems
#[cfg(target_family = "unix")]
fn make_symlink(src: &Utf8Path, dest: &Utf8Path) -> Result<()> {
std::os::unix::fs::symlink(src, dest)
.with_context(|| "Failed to create symlink for generating metadata")
}
/// Create a symlink file on windows systems
#[cfg(target_family = "windows")]
fn make_symlink(src: &Utf8Path, dest: &Utf8Path) -> Result<()> {
std::os::windows::fs::symlink_file(src, dest)
.with_context(|| "Failed to create symlink for generating metadata")
}
/// A workspace metadata fetcher that uses the Cargo commands to gather information about a Cargo
/// project and it's transitive dependencies for planning and rendering of Bazel BUILD files.
pub struct RazeMetadataFetcher {
registry_url: Url,
index_url: Url,
metadata_fetcher: Box<dyn MetadataFetcher>,
lockfile_generator: Box<dyn LockfileGenerator>,
settings: Option<RazeSettings>,
}
impl RazeMetadataFetcher {
pub fn new<P: Into<Utf8PathBuf>>(
cargo_bin_path: P,
registry_url: Url,
index_url: Url,
settings: Option<RazeSettings>,
) -> RazeMetadataFetcher {
let cargo_bin_pathbuf: Utf8PathBuf = cargo_bin_path.into();
RazeMetadataFetcher {
registry_url,
index_url,
metadata_fetcher: Box::new(CargoMetadataFetcher {
cargo_bin_path: cargo_bin_pathbuf.clone(),
}),
lockfile_generator: Box::new(CargoLockfileGenerator {
cargo_bin_path: cargo_bin_pathbuf,
}),
settings,
}
}
pub fn new_with_settings(settings: Option<RazeSettings>) -> RazeMetadataFetcher {
RazeMetadataFetcher::new(
cargo_bin_path(),
// UNWRAP: The default is covered by testing and should never return err
Url::parse(DEFAULT_CRATE_REGISTRY_URL).unwrap(),
Url::parse(DEFAULT_CRATE_INDEX_URL).unwrap(),
settings,
)
}
/// Reassign the [`crate::metadata::MetadataFetcher`] associated with the Raze Metadata Fetcher
pub fn set_metadata_fetcher(&mut self, fetcher: Box<dyn MetadataFetcher>) {
self.metadata_fetcher = fetcher;
}
/// Reassign the [`crate::metadata::LockfileGenerator`] associated with the current Fetcher
pub fn set_lockfile_generator(&mut self, generator: Box<dyn LockfileGenerator>) {
self.lockfile_generator = generator;
}
/// Symlinks the source code of all workspace members into the temp workspace
fn link_src_to_workspace(&self, no_deps_metadata: &Metadata, temp_dir: &Utf8Path) -> Result<()> {
let crate_member_id_re = match consts::OS {
"windows" => Regex::new(r".+\(path\+file:///(.+)\)")?,
_ => Regex::new(r".+\(path\+file://(.+)\)")?,
};
for member in no_deps_metadata.workspace_members.iter() {
// Get a path to the workspace member directory
let workspace_member_directory = {
let crate_member_id_match = crate_member_id_re
.captures(&member.repr)
.and_then(|cap| cap.get(1));
if crate_member_id_match.is_none() {
continue;
}
// UNWRAP: guarded above
Utf8PathBuf::from(crate_member_id_match.unwrap().as_str())
};
// Sanity check: The assumption is that any crate with an `id` that matches
// the regex pattern above should contain a Cargo.toml file with which we
// can use to infer the existence of libraries from relative paths such as
// `src/lib.rs` and `src/main.rs`.
let toml_path = workspace_member_directory.join("Cargo.toml");
if!toml_path.exists() {
return Err(anyhow!(format!(
"The regex pattern `{}` found a path that did not contain a Cargo.toml file: `{}`",
crate_member_id_re.as_str(),
workspace_member_directory
)));
}
// Copy the Cargo.toml files into the temp directory to match the directory structure on disk
let path_diff = diff_paths(
&workspace_member_directory,
&no_deps_metadata.workspace_root,
)
.ok_or_else(|| {
anyhow!("All workspace members are expected to be under the workspace root")
})?;
let diff = Utf8PathBuf::from_path_buf(path_diff)
.map_err(|_e| anyhow!("Invalid UTF-8 in path diff."))?;
let new_path = temp_dir.join(diff);
fs::create_dir_all(&new_path)?;
fs::copy(
workspace_member_directory.join("Cargo.toml"),
new_path.join("Cargo.toml"),
)?;
// Additionally, symlink everything in some common source directories to ensure specified
// library targets can be relied on and won't prevent fetching metadata
for dir in vec!["bin", "src"].iter() {
let glob_pattern = format!("{}/**/*.rs", workspace_member_directory.join(dir));
for entry in glob(glob_pattern.as_str()).expect("Failed to read glob pattern") {
let path = Utf8PathBuf::from_path_buf(entry?)
.map_err(|_e| anyhow!("Invalid UTF-8 in source directory."))?;
// Determine the difference between the workspace root and the current file
let path_diff = diff_paths(&path, &no_deps_metadata.workspace_root).ok_or_else(|| {
anyhow!("All workspace members are expected to be under the workspace root")
})?;
let diff = Utf8PathBuf::from_path_buf(path_diff)
.map_err(|_e| anyhow!("Invalid UTF-8 in source directory path diff."))?;
// Create a matching directory tree for the current file within the temp workspace
let new_path = temp_dir.join(diff.as_path());
if let Some(parent) = new_path.parent() {
fs::create_dir_all(parent)?;
}
make_symlink(&path, &new_path)?;
}
}
}
Ok(())
}
/// Creates a copy workspace in a temporary directory for fetching the metadata of the current workspace
fn make_temp_workspace(&self, cargo_workspace_root: &Utf8Path) -> Result<(TempDir, Utf8PathBuf)> {
let temp_dir = TempDir::new()?;
// First gather metadata without downloading any dependencies so we can identify any path dependencies.
let no_deps_metadata = self
.metadata_fetcher
.fetch_metadata(cargo_workspace_root, /*include_deps=*/ false)?;
// There should be a `Cargo.toml` file in the workspace root
fs::copy(
no_deps_metadata.workspace_root.join("Cargo.toml"),
temp_dir.as_ref().join("Cargo.toml"),
)?;
// Optionally copy over the lock file
if no_deps_metadata.workspace_root.join("Cargo.lock").exists() {
fs::copy(
no_deps_metadata.workspace_root.join("Cargo.lock"),
temp_dir.as_ref().join("Cargo.lock"),
)?;
}
let source_dotcargo = cargo_workspace_root.join(".cargo");
let source_dotcargo_config = source_dotcargo.join("config.toml");
if source_dotcargo_config.exists() {
let destination_dotcargo = temp_dir.path().join(".cargo");
fs::create_dir(&destination_dotcargo)?;
let destination_dotcargo_config = destination_dotcargo.join("config.toml");
fs::copy(&source_dotcargo_config, &destination_dotcargo_config)?;
}
// Copy over the Cargo.toml files of each workspace member
let temp_path = Utf8Path::from_path(temp_dir.as_ref())
.ok_or_else(|| anyhow!("Invalid UTF-8 in temp path."))?;
self.link_src_to_workspace(&no_deps_metadata, temp_path)?;
Ok((temp_dir, no_deps_metadata.workspace_root))
}
/// Download a crate's source code from the current registry url
fn fetch_crate_src(&self, dir: &Utf8Path, name: &str, version: &str) -> Result<Utf8PathBuf> {
// The registry url should only be the host URL with ports. No path
let registry_url = {
let mut r_url = self.registry_url.clone();
r_url.set_path("");
r_url.to_string()
};
// Generate a URL with no path. This allows the path to keep any port information
// associated with it.
let mut url = url::Url::parse(®istry_url)?;
url.set_path("");
log::debug!("Cloning binary dependency: {}", &name);
let mut cloner = cargo_clone::Cloner::new();
cloner
.set_registry_url(url.to_string().trim_end_matches('/'))
.set_out_dir(dir);
cloner.clone(
cargo_clone::CloneMethodKind::Crate,
name,
Some(version),
&Vec::new(),
)?;
let crate_dir = dir.join(package_ident(name, version));
if!crate_dir.exists() {
return Err(anyhow!("Directory does not exist"));
}
Ok(crate_dir)
}
/// Add binary dependencies as workspace members to the given workspace root Cargo.toml file
fn inject_binaries_into_workspace(
&self,
binary_deps: Vec<String>,
root_toml: &Utf8Path,
) -> Result<()> {
// Read the current manifest
let mut manifest = {
let content = fs::read_to_string(root_toml)?;
cargo_toml::Manifest::from_str(content.as_str())?
};
// Parse the current `workspace` section of the manifest if one exists
let mut workspace = match manifest.workspace {
Some(workspace) => workspace,
None => cargo_toml::Workspace::default(),
};
// Add the binary dependencies as workspace members to the `workspace` metadata
for dep in binary_deps.iter() {
workspace.members.push(dep.to_string());
}
// Replace the workspace metadata with the modified metadata
manifest.workspace = Some(workspace);
// Write the metadata back to disk.
// cargo_toml::Manifest cannot be serialized direcly.
// see: https://gitlab.com/crates.rs/cargo_toml/-/issues/3
let value = toml::Value::try_from(&manifest)?;
std::fs::write(root_toml, toml::to_string(&value)?)
.with_context(|| format!("Failed to inject workspace metadata to {}", root_toml))
}
/// Look up a crate in a specified crate index to determine it's checksum
fn fetch_crate_checksum(&self, name: &str, version: &str) -> Result<String> {
let index_url_is_file = self.index_url.scheme().to_lowercase() == "file";
let crate_index_path = if!index_url_is_file {
crates_index::BareIndex::from_url(self.index_url.as_ref())?
.open_or_clone()?
.crate_(name)
.ok_or_else(|| anyhow!("Failed to find crate '{}' in index", name))?
} else {
crates_index::Index::new(self.index_url.path())
.crate_(name)
.ok_or_else(|| anyhow!("Failed to find crate '{}' in index", name))?
};
let (_index, crate_version) = crate_index_path
.versions()
.iter()
.enumerate()
.find(|(_, ver)| ver.version() == version)
.ok_or_else(|| anyhow!("Failed to find version {} for crate {}", version, name))?;
Ok(crate_version.checksum()[..].to_hex())
}
/// Ensures a lockfile is generated for a crate on disk
///
/// Args:
/// - reused_lockfile: An optional lockfile to use for fetching metadata to
/// ensure subsequent metadata fetches return consistent results.
/// - cargo_dir: The directory of the cargo workspace to gather metadata for.
/// Returns:
/// If a new lockfile was generated via the `lockfile_generator`, that
/// Lockfile object is returned. New lockfiles are generated when
/// `reused_lockfile` is not provided.
fn cargo_generate_lockfile(
&self,
reused_lockfile: &Option<Utf8PathBuf>,
cargo_dir: &Utf8Path,
) -> Result<Option<Lockfile>> {
let lockfile_path = cargo_dir.join("Cargo.lock");
// Use the reusable lockfile if one is provided
if let Some(reused_lockfile) = reused_lockfile {
fs::copy(reused_lockfile, &lockfile_path)?;
return Ok(None);
}
let lockfile = self.lockfile_generator.generate_lockfile(cargo_dir)?;
// Returning the lockfile here signifies that a new lockfile has been created.
Ok(Some(lockfile))
}
/// Gather all information about a Cargo project to use for planning and rendering steps
pub fn fetch_metadata(
&self,
cargo_workspace_root: &Utf8Path,
binary_dep_info: Option<&HashMap<String, cargo_toml::Dependency>>,
reused_lockfile: Option<Utf8PathBuf>,
) -> Result<RazeMetadata> {
let (cargo_dir, cargo_workspace_root) = self.make_temp_workspace(cargo_workspace_root)?;
let utf8_cargo_dir = Utf8Path::from_path(cargo_dir.as_ref())
.ok_or_else(|| anyhow!("Cargo dir has invalid UTF-8 in fetch_metadata."))?;
let cargo_root_toml = utf8_cargo_dir.join("Cargo.toml");
// Gather new lockfile data if any binary dependencies were provided
let mut checksums: HashMap<String, String> = HashMap::new();
if let Some(binary_dep_info) = binary_dep_info {
if!binary_dep_info.is_empty() {
let mut src_dirnames: Vec<String> = Vec::new();
for (name, info) in binary_dep_info.iter() {
let version = info.req();
let src_dir = self.fetch_crate_src(utf8_cargo_dir, name, version)?;
checksums.insert(
package_ident(name, version),
self.fetch_crate_checksum(name, version)?,
);
if let Some(dirname) = src_dir.file_name() {
src_dirnames.push(dirname.to_string());
}
}
self.inject_binaries_into_workspace(src_dirnames, &cargo_root_toml)?;
}
}
let output_lockfile = self.cargo_generate_lockfile(&reused_lockfile, utf8_cargo_dir)?;
// Load checksums from the lockfile
let workspace_toml_lock = cargo_dir.as_ref().join("Cargo.lock");
if workspace_toml_lock.exists() {
let lockfile = Lockfile::load(workspace_toml_lock)?;
for package in &lockfile.packages {
if let Some(checksum) = &package.checksum |
}
}
let metadata = self
.metadata_fetcher
.fetch_metadata(utf8_cargo_dir, /*include_deps=*/ true)?;
// In this function because it's metadata, even though it's not returned by `cargo-metadata`
let platform_features = match self.settings.as_ref() {
Some(settings) => get_per_platform_features(cargo_dir.path(), settings, &metadata.packages)?,
None => BTreeMap::new(),
};
Ok(RazeMetadata {
metadata,
checksums,
cargo_workspace_root,
lockfile: output_lockfile,
features: platform_features,
})
}
}
impl Default for RazeMetadataFetcher {
fn default() -> RazeMetadataFetcher {
RazeMetadataFetcher::new(
cargo_bin_path(),
// UNWRAP: The default is covered by testing and should never return err
Url::parse(DEFAULT_CRATE_REGISTRY_URL).unwrap(),
Url::parse(DEFAULT_CRATE_INDEX_URL).unwrap(),
None,
)
}
}
/// A struct containing information about a binary dependency
pub struct BinaryDependencyInfo {
pub name: String,
pub info: cargo_toml::Dependency,
pub lockfile: Option<Utf8PathBuf>,
}
#[cfg(test)]
pub mod tests {
use anyhow::Context;
use camino::Utf8PathBuf;
use httpmock::MockServer;
use tera::Tera;
use super::*;
use crate::testing::*;
use std::{fs::File, io::Write, str::FromStr};
pub struct DummyCargoMetadataFetcher {
pub metadata_template: Option<String>,
}
impl DummyCargoMetadataFetcher {
fn render_metadata(&self, mock_workspace_path: &Utf8Path) -> Option<Metadata> {
self.metadata_template.as_ref()?;
let dir = TempDir::new().unwrap();
let mut renderer = Tera::new(&format!("{}/*", dir.as_ref().display())).unwrap();
let templates_dir = Utf8PathBuf::from(std::file!())
.parent()
.unwrap()
.join("testing/metadata_templates")
.canonicalize()
.unwrap();
renderer
.add_raw_templates(vec![(
self.metadata_template.as_ref().unwrap(),
fs::read_to_string(templates_dir.join(self.metadata_template.as_ref().unwrap())).unwrap(),
)])
.unwrap();
let mut context = tera::Context::new();
context.insert("mock_workspace", &mock_workspace_path);
context.insert("crate_index_root", "/some/fake/home/path/.cargo");
let content = renderer
.render(self.metadata_template.as_ref().unwrap(), &context)
.unwrap();
Some(serde_json::from_str::<Metadata>(&content).unwrap())
}
}
impl MetadataFetcher for DummyCargoMetadataFetcher {
fn fetch_metadata(&self, working_dir: &Utf8Path, include_deps: bool) -> Result<Metadata> {
// Only use the template if the command is looking to reach out to the internet.
if include_deps {
if let Some(metadata) = self.render_metadata(working_dir) {
return Ok(metadata);
}
}
// Ensure no the command is ran in `offline` mode and no dependencies are checked.
MetadataCommand::new()
.cargo_path(cargo_bin_path())
.no_deps()
.current_dir(working_dir)
.other_options(vec!["--offline".to_string()])
.exec()
.with_context(|| {
format!(
"Failed to run `{} metadata` with contents:\n{}",
cargo_bin_path(),
fs::read_to_string(working_dir.join("Cargo.toml")).unwrap()
)
})
}
}
pub struct DummyLockfileGenerator {
// Optional lockfile to use for generation
pub lockfile_contents: Option<String>,
}
impl LockfileGenerator for DummyLockfileGenerator {
fn generate_lockfile(&self, _crate_root_dir: &Utf8Path) -> Result<Lockfile> {
match &self.lockfile_contents {
Some(contents) => Lockfile::from_str(contents)
.with_context(|| format!("Failed to load provided lockfile:\n{}", contents)),
None => Lockfile::from_str(basic_lock_contents())
.with_context(|| format!("Failed to load dummy lockfile:\n{}", basic_lock_contents())),
}
}
}
pub fn dummy_raze_metadata_fetcher() -> (RazeMetadataFetcher, MockServer, TempDir) {
let tempdir = TempDir::new().unwrap();
let mock_server = MockServer::start();
let mut fetcher = RazeMetadataFetcher::new(
cargo_bin_path(),
Url::parse(&mock_server.base_url()).unwrap(),
Url::parse(&format!("file://{}", tempdir.as_ref().display())).unwrap(),
None,
);
fetcher.set_metadata_fetcher(Box::new(DummyCargoMetadataFetcher {
metadata_template: None,
}));
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
(fetcher, mock_server, tempdir)
}
pub fn dummy_raze_metadata() -> RazeMetadata {
let dir = make_basic_workspace();
let (mut fetcher, _server, _index_dir) = dummy_raze_metadata_fetcher();
// Always render basic metadata
fetcher.set_metadata_fetcher(Box::new(DummyCargoMetadataFetcher {
metadata_template: Some(templates::BASIC_METADATA.to_string()),
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap()
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_without_lock() {
let dir = TempDir::new().unwrap();
let toml_path = dir.path().join("Cargo.toml");
let mut toml = File::create(&toml_path).unwrap();
toml.write_all(basic_toml_contents().as_bytes()).unwrap();
let mut fetcher = RazeMetadataFetcher::new_with_settings(None);
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_with_lock() {
let dir = TempDir::new().unwrap();
// Create Cargo.toml
{
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(basic_toml_contents().as_bytes()).unwrap();
}
// Create Cargo.lock
{
let path = dir.path().join("Cargo.lock");
let mut lock = File::create(&path).unwrap();
lock.write_all(basic_lock_contents().as_bytes()).unwrap();
}
let mut fetcher = RazeMetadataFetcher::default();
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_handles_bad_files() {
let dir = TempDir::new().unwrap();
// Create Cargo.toml
{
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(b"hello").unwrap();
}
let fetcher = RazeMetadataFetcher::default();
assert!( | {
checksums.insert(
package_ident(package.name.as_ref(), &package.version.to_string()),
checksum.to_string(),
);
} | conditional_block |
metadata.rs |
}
impl MetadataFetcher for CargoMetadataFetcher {
fn fetch_metadata(&self, working_dir: &Utf8Path, include_deps: bool) -> Result<Metadata> {
let mut command = MetadataCommand::new();
if!include_deps {
command.no_deps();
}
command
.cargo_path(&self.cargo_bin_path)
.current_dir(working_dir)
.exec()
.with_context(|| {
format!(
"Failed to fetch Metadata with `{}` from `{}`",
&self.cargo_bin_path, working_dir
)
})
}
}
/// An entity that can generate a lockfile data within a Cargo workspace
pub trait LockfileGenerator {
fn generate_lockfile(&self, crate_root_dir: &Utf8Path) -> Result<Lockfile>;
}
/// A lockfile generator which simply wraps the `cargo generate-lockfile` command
struct CargoLockfileGenerator {
cargo_bin_path: Utf8PathBuf,
}
impl LockfileGenerator for CargoLockfileGenerator {
/// Generate lockfile information from a cargo workspace root
fn generate_lockfile(&self, crate_root_dir: &Utf8Path) -> Result<Lockfile> {
let lockfile_path = crate_root_dir.join("Cargo.lock");
// Generate lockfile
let output = std::process::Command::new(&self.cargo_bin_path)
.arg("generate-lockfile")
.current_dir(crate_root_dir)
.output()
.with_context(|| format!("Generating lockfile in {}", crate_root_dir))?;
if!output.status.success() {
anyhow::bail!(
"Failed to generate lockfile in {}: {}",
crate_root_dir,
String::from_utf8_lossy(&output.stderr)
);
}
// Load lockfile contents
Lockfile::load(&lockfile_path)
.with_context(|| format!("Failed to load lockfile: {}", lockfile_path))
}
}
/// A struct containing all metadata about a project with which to plan generated output files for
#[derive(Debug, Clone)]
pub struct RazeMetadata {
// `cargo metadata` output of the current project
pub metadata: Metadata,
// The absolute path to the current project's cargo workspace root. Note that the workspace
// root in `metadata` will be inside of a temporary directory. For details see:
// https://doc.rust-lang.org/cargo/reference/workspaces.html#root-package
pub cargo_workspace_root: Utf8PathBuf,
// The metadata of a lockfile that was generated as a result of fetching metadata
pub lockfile: Option<Lockfile>,
// A map of all known crates with checksums. Use `checksums_for` to access data from this map.
pub checksums: HashMap<String, String>,
// A map of crates to their enabled general and per-platform features.
pub features: BTreeMap<PackageId, Features>,
}
impl RazeMetadata {
/// Get the checksum of a crate using a unique formatter.
pub fn checksum_for(&self, name: &str, version: &str) -> Option<&String> {
self.checksums.get(&package_ident(name, version))
}
}
/// Create a symlink file on unix systems
#[cfg(target_family = "unix")]
fn make_symlink(src: &Utf8Path, dest: &Utf8Path) -> Result<()> {
std::os::unix::fs::symlink(src, dest)
.with_context(|| "Failed to create symlink for generating metadata")
}
/// Create a symlink file on windows systems
#[cfg(target_family = "windows")]
fn make_symlink(src: &Utf8Path, dest: &Utf8Path) -> Result<()> {
std::os::windows::fs::symlink_file(src, dest)
.with_context(|| "Failed to create symlink for generating metadata")
}
/// A workspace metadata fetcher that uses the Cargo commands to gather information about a Cargo
/// project and it's transitive dependencies for planning and rendering of Bazel BUILD files.
pub struct RazeMetadataFetcher {
registry_url: Url,
index_url: Url,
metadata_fetcher: Box<dyn MetadataFetcher>,
lockfile_generator: Box<dyn LockfileGenerator>,
settings: Option<RazeSettings>,
}
impl RazeMetadataFetcher {
pub fn new<P: Into<Utf8PathBuf>>(
cargo_bin_path: P,
registry_url: Url,
index_url: Url,
settings: Option<RazeSettings>,
) -> RazeMetadataFetcher {
let cargo_bin_pathbuf: Utf8PathBuf = cargo_bin_path.into();
RazeMetadataFetcher {
registry_url,
index_url,
metadata_fetcher: Box::new(CargoMetadataFetcher {
cargo_bin_path: cargo_bin_pathbuf.clone(),
}),
lockfile_generator: Box::new(CargoLockfileGenerator {
cargo_bin_path: cargo_bin_pathbuf,
}),
settings,
}
}
pub fn new_with_settings(settings: Option<RazeSettings>) -> RazeMetadataFetcher {
RazeMetadataFetcher::new(
cargo_bin_path(),
// UNWRAP: The default is covered by testing and should never return err
Url::parse(DEFAULT_CRATE_REGISTRY_URL).unwrap(),
Url::parse(DEFAULT_CRATE_INDEX_URL).unwrap(),
settings,
)
}
/// Reassign the [`crate::metadata::MetadataFetcher`] associated with the Raze Metadata Fetcher
pub fn set_metadata_fetcher(&mut self, fetcher: Box<dyn MetadataFetcher>) {
self.metadata_fetcher = fetcher;
}
/// Reassign the [`crate::metadata::LockfileGenerator`] associated with the current Fetcher
pub fn set_lockfile_generator(&mut self, generator: Box<dyn LockfileGenerator>) {
self.lockfile_generator = generator;
}
/// Symlinks the source code of all workspace members into the temp workspace
fn link_src_to_workspace(&self, no_deps_metadata: &Metadata, temp_dir: &Utf8Path) -> Result<()> {
let crate_member_id_re = match consts::OS {
"windows" => Regex::new(r".+\(path\+file:///(.+)\)")?,
_ => Regex::new(r".+\(path\+file://(.+)\)")?,
};
for member in no_deps_metadata.workspace_members.iter() {
// Get a path to the workspace member directory
let workspace_member_directory = {
let crate_member_id_match = crate_member_id_re
.captures(&member.repr)
.and_then(|cap| cap.get(1));
if crate_member_id_match.is_none() {
continue;
}
// UNWRAP: guarded above
Utf8PathBuf::from(crate_member_id_match.unwrap().as_str())
};
// Sanity check: The assumption is that any crate with an `id` that matches
// the regex pattern above should contain a Cargo.toml file with which we
// can use to infer the existence of libraries from relative paths such as
// `src/lib.rs` and `src/main.rs`.
let toml_path = workspace_member_directory.join("Cargo.toml");
if!toml_path.exists() {
return Err(anyhow!(format!(
"The regex pattern `{}` found a path that did not contain a Cargo.toml file: `{}`",
crate_member_id_re.as_str(),
workspace_member_directory
)));
}
// Copy the Cargo.toml files into the temp directory to match the directory structure on disk
let path_diff = diff_paths(
&workspace_member_directory,
&no_deps_metadata.workspace_root,
)
.ok_or_else(|| {
anyhow!("All workspace members are expected to be under the workspace root")
})?;
let diff = Utf8PathBuf::from_path_buf(path_diff)
.map_err(|_e| anyhow!("Invalid UTF-8 in path diff."))?;
let new_path = temp_dir.join(diff);
fs::create_dir_all(&new_path)?;
fs::copy(
workspace_member_directory.join("Cargo.toml"),
new_path.join("Cargo.toml"),
)?;
// Additionally, symlink everything in some common source directories to ensure specified
// library targets can be relied on and won't prevent fetching metadata
for dir in vec!["bin", "src"].iter() {
let glob_pattern = format!("{}/**/*.rs", workspace_member_directory.join(dir));
for entry in glob(glob_pattern.as_str()).expect("Failed to read glob pattern") {
let path = Utf8PathBuf::from_path_buf(entry?)
.map_err(|_e| anyhow!("Invalid UTF-8 in source directory."))?;
// Determine the difference between the workspace root and the current file
let path_diff = diff_paths(&path, &no_deps_metadata.workspace_root).ok_or_else(|| {
anyhow!("All workspace members are expected to be under the workspace root")
})?;
let diff = Utf8PathBuf::from_path_buf(path_diff)
.map_err(|_e| anyhow!("Invalid UTF-8 in source directory path diff."))?;
// Create a matching directory tree for the current file within the temp workspace
let new_path = temp_dir.join(diff.as_path());
if let Some(parent) = new_path.parent() {
fs::create_dir_all(parent)?;
}
make_symlink(&path, &new_path)?;
}
}
}
Ok(())
}
/// Creates a copy workspace in a temporary directory for fetching the metadata of the current workspace
fn make_temp_workspace(&self, cargo_workspace_root: &Utf8Path) -> Result<(TempDir, Utf8PathBuf)> {
let temp_dir = TempDir::new()?;
// First gather metadata without downloading any dependencies so we can identify any path dependencies.
let no_deps_metadata = self
.metadata_fetcher
.fetch_metadata(cargo_workspace_root, /*include_deps=*/ false)?;
// There should be a `Cargo.toml` file in the workspace root
fs::copy(
no_deps_metadata.workspace_root.join("Cargo.toml"),
temp_dir.as_ref().join("Cargo.toml"),
)?;
// Optionally copy over the lock file
if no_deps_metadata.workspace_root.join("Cargo.lock").exists() {
fs::copy(
no_deps_metadata.workspace_root.join("Cargo.lock"),
temp_dir.as_ref().join("Cargo.lock"),
)?;
}
let source_dotcargo = cargo_workspace_root.join(".cargo");
let source_dotcargo_config = source_dotcargo.join("config.toml");
if source_dotcargo_config.exists() {
let destination_dotcargo = temp_dir.path().join(".cargo");
fs::create_dir(&destination_dotcargo)?;
let destination_dotcargo_config = destination_dotcargo.join("config.toml");
fs::copy(&source_dotcargo_config, &destination_dotcargo_config)?;
}
// Copy over the Cargo.toml files of each workspace member
let temp_path = Utf8Path::from_path(temp_dir.as_ref())
.ok_or_else(|| anyhow!("Invalid UTF-8 in temp path."))?;
self.link_src_to_workspace(&no_deps_metadata, temp_path)?;
Ok((temp_dir, no_deps_metadata.workspace_root))
}
/// Download a crate's source code from the current registry url
fn fetch_crate_src(&self, dir: &Utf8Path, name: &str, version: &str) -> Result<Utf8PathBuf> {
// The registry url should only be the host URL with ports. No path
let registry_url = {
let mut r_url = self.registry_url.clone();
r_url.set_path("");
r_url.to_string()
};
// Generate a URL with no path. This allows the path to keep any port information
// associated with it.
let mut url = url::Url::parse(®istry_url)?;
url.set_path("");
log::debug!("Cloning binary dependency: {}", &name);
let mut cloner = cargo_clone::Cloner::new();
cloner
.set_registry_url(url.to_string().trim_end_matches('/'))
.set_out_dir(dir);
cloner.clone(
cargo_clone::CloneMethodKind::Crate,
name,
Some(version),
&Vec::new(),
)?;
let crate_dir = dir.join(package_ident(name, version));
if!crate_dir.exists() {
return Err(anyhow!("Directory does not exist"));
}
Ok(crate_dir)
}
/// Add binary dependencies as workspace members to the given workspace root Cargo.toml file
fn inject_binaries_into_workspace(
&self,
binary_deps: Vec<String>,
root_toml: &Utf8Path,
) -> Result<()> {
// Read the current manifest
let mut manifest = {
let content = fs::read_to_string(root_toml)?;
cargo_toml::Manifest::from_str(content.as_str())?
};
// Parse the current `workspace` section of the manifest if one exists
let mut workspace = match manifest.workspace {
Some(workspace) => workspace,
None => cargo_toml::Workspace::default(),
};
// Add the binary dependencies as workspace members to the `workspace` metadata
for dep in binary_deps.iter() {
workspace.members.push(dep.to_string());
}
// Replace the workspace metadata with the modified metadata
manifest.workspace = Some(workspace);
// Write the metadata back to disk.
// cargo_toml::Manifest cannot be serialized direcly.
// see: https://gitlab.com/crates.rs/cargo_toml/-/issues/3
let value = toml::Value::try_from(&manifest)?;
std::fs::write(root_toml, toml::to_string(&value)?)
.with_context(|| format!("Failed to inject workspace metadata to {}", root_toml))
}
/// Look up a crate in a specified crate index to determine it's checksum
fn fetch_crate_checksum(&self, name: &str, version: &str) -> Result<String> {
let index_url_is_file = self.index_url.scheme().to_lowercase() == "file";
let crate_index_path = if!index_url_is_file {
crates_index::BareIndex::from_url(self.index_url.as_ref())?
.open_or_clone()?
.crate_(name)
.ok_or_else(|| anyhow!("Failed to find crate '{}' in index", name))?
} else {
crates_index::Index::new(self.index_url.path())
.crate_(name)
.ok_or_else(|| anyhow!("Failed to find crate '{}' in index", name))?
};
let (_index, crate_version) = crate_index_path
.versions()
.iter()
.enumerate()
.find(|(_, ver)| ver.version() == version)
.ok_or_else(|| anyhow!("Failed to find version {} for crate {}", version, name))?;
Ok(crate_version.checksum()[..].to_hex())
}
/// Ensures a lockfile is generated for a crate on disk
///
/// Args:
/// - reused_lockfile: An optional lockfile to use for fetching metadata to
/// ensure subsequent metadata fetches return consistent results.
/// - cargo_dir: The directory of the cargo workspace to gather metadata for.
/// Returns:
/// If a new lockfile was generated via the `lockfile_generator`, that
/// Lockfile object is returned. New lockfiles are generated when
/// `reused_lockfile` is not provided.
fn cargo_generate_lockfile(
&self,
reused_lockfile: &Option<Utf8PathBuf>,
cargo_dir: &Utf8Path,
) -> Result<Option<Lockfile>> {
let lockfile_path = cargo_dir.join("Cargo.lock");
// Use the reusable lockfile if one is provided
if let Some(reused_lockfile) = reused_lockfile {
fs::copy(reused_lockfile, &lockfile_path)?;
return Ok(None);
}
let lockfile = self.lockfile_generator.generate_lockfile(cargo_dir)?;
// Returning the lockfile here signifies that a new lockfile has been created.
Ok(Some(lockfile))
}
/// Gather all information about a Cargo project to use for planning and rendering steps
pub fn fetch_metadata(
&self,
cargo_workspace_root: &Utf8Path,
binary_dep_info: Option<&HashMap<String, cargo_toml::Dependency>>,
reused_lockfile: Option<Utf8PathBuf>,
) -> Result<RazeMetadata> {
let (cargo_dir, cargo_workspace_root) = self.make_temp_workspace(cargo_workspace_root)?;
let utf8_cargo_dir = Utf8Path::from_path(cargo_dir.as_ref())
.ok_or_else(|| anyhow!("Cargo dir has invalid UTF-8 in fetch_metadata."))?;
let cargo_root_toml = utf8_cargo_dir.join("Cargo.toml");
// Gather new lockfile data if any binary dependencies were provided
let mut checksums: HashMap<String, String> = HashMap::new();
if let Some(binary_dep_info) = binary_dep_info {
if!binary_dep_info.is_empty() {
let mut src_dirnames: Vec<String> = Vec::new();
for (name, info) in binary_dep_info.iter() {
let version = info.req();
let src_dir = self.fetch_crate_src(utf8_cargo_dir, name, version)?;
checksums.insert(
package_ident(name, version),
self.fetch_crate_checksum(name, version)?,
);
if let Some(dirname) = src_dir.file_name() {
src_dirnames.push(dirname.to_string());
}
}
self.inject_binaries_into_workspace(src_dirnames, &cargo_root_toml)?;
}
}
let output_lockfile = self.cargo_generate_lockfile(&reused_lockfile, utf8_cargo_dir)?;
// Load checksums from the lockfile
let workspace_toml_lock = cargo_dir.as_ref().join("Cargo.lock");
if workspace_toml_lock.exists() {
let lockfile = Lockfile::load(workspace_toml_lock)?;
for package in &lockfile.packages {
if let Some(checksum) = &package.checksum {
checksums.insert(
package_ident(package.name.as_ref(), &package.version.to_string()),
checksum.to_string(),
);
}
}
}
let metadata = self
.metadata_fetcher
.fetch_metadata(utf8_cargo_dir, /*include_deps=*/ true)?;
// In this function because it's metadata, even though it's not returned by `cargo-metadata`
let platform_features = match self.settings.as_ref() {
Some(settings) => get_per_platform_features(cargo_dir.path(), settings, &metadata.packages)?,
None => BTreeMap::new(),
};
Ok(RazeMetadata {
metadata,
checksums,
cargo_workspace_root,
lockfile: output_lockfile,
features: platform_features,
})
}
}
impl Default for RazeMetadataFetcher {
fn default() -> RazeMetadataFetcher {
RazeMetadataFetcher::new(
cargo_bin_path(),
// UNWRAP: The default is covered by testing and should never return err
Url::parse(DEFAULT_CRATE_REGISTRY_URL).unwrap(),
Url::parse(DEFAULT_CRATE_INDEX_URL).unwrap(),
None,
)
}
}
/// A struct containing information about a binary dependency
pub struct BinaryDependencyInfo {
pub name: String,
pub info: cargo_toml::Dependency,
pub lockfile: Option<Utf8PathBuf>,
}
#[cfg(test)]
pub mod tests {
use anyhow::Context;
use camino::Utf8PathBuf;
use httpmock::MockServer;
use tera::Tera;
use super::*;
use crate::testing::*;
use std::{fs::File, io::Write, str::FromStr};
pub struct DummyCargoMetadataFetcher {
pub metadata_template: Option<String>,
}
impl DummyCargoMetadataFetcher {
fn render_metadata(&self, mock_workspace_path: &Utf8Path) -> Option<Metadata> {
self.metadata_template.as_ref()?;
let dir = TempDir::new().unwrap();
let mut renderer = Tera::new(&format!("{}/*", dir.as_ref().display())).unwrap();
let templates_dir = Utf8PathBuf::from(std::file!())
.parent()
.unwrap()
.join("testing/metadata_templates")
.canonicalize()
.unwrap();
renderer
.add_raw_templates(vec![(
self.metadata_template.as_ref().unwrap(),
fs::read_to_string(templates_dir.join(self.metadata_template.as_ref().unwrap())).unwrap(),
)])
.unwrap();
let mut context = tera::Context::new();
context.insert("mock_workspace", &mock_workspace_path);
context.insert("crate_index_root", "/some/fake/home/path/.cargo");
let content = renderer
.render(self.metadata_template.as_ref().unwrap(), &context)
.unwrap();
Some(serde_json::from_str::<Metadata>(&content).unwrap())
}
}
impl MetadataFetcher for DummyCargoMetadataFetcher {
fn fetch_metadata(&self, working_dir: &Utf8Path, include_deps: bool) -> Result<Metadata> {
// Only use the template if the command is looking to reach out to the internet.
if include_deps {
if let Some(metadata) = self.render_metadata(working_dir) {
return Ok(metadata);
}
}
// Ensure no the command is ran in `offline` mode and no dependencies are checked.
MetadataCommand::new()
.cargo_path(cargo_bin_path())
.no_deps()
.current_dir(working_dir)
.other_options(vec!["--offline".to_string()])
.exec()
.with_context(|| {
format!(
"Failed to run `{} metadata` with contents:\n{}",
cargo_bin_path(),
fs::read_to_string(working_dir.join("Cargo.toml")).unwrap()
)
})
}
}
pub struct DummyLockfileGenerator {
// Optional lockfile to use for generation
pub lockfile_contents: Option<String>,
}
impl LockfileGenerator for DummyLockfileGenerator {
fn generate_lockfile(&self, _crate_root_dir: &Utf8Path) -> Result<Lockfile> {
match &self.lockfile_contents {
Some(contents) => Lockfile::from_str(contents)
.with_context(|| format!("Failed to load provided lockfile:\n{}", contents)),
None => Lockfile::from_str(basic_lock_contents())
.with_context(|| format!("Failed to load dummy lockfile:\n{}", basic_lock_contents())),
}
}
}
pub fn dummy_raze_metadata_fetcher() -> (RazeMetadataFetcher, MockServer, TempDir) {
let tempdir = TempDir::new().unwrap();
let mock_server = MockServer::start();
let mut fetcher = RazeMetadataFetcher::new(
cargo_bin_path(),
Url::parse(&mock_server.base_url()).unwrap(),
Url::parse(&format!("file://{}", tempdir.as_ref().display())).unwrap(),
None,
);
fetcher.set_metadata_fetcher(Box::new(DummyCargoMetadataFetcher {
metadata_template: None,
}));
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
(fetcher, mock_server, tempdir)
}
pub fn dummy_raze_metadata() -> RazeMetadata {
let dir = make_basic_workspace();
let (mut fetcher, _server, _index_dir) = dummy_raze_metadata_fetcher();
// Always render basic metadata
fetcher.set_metadata_fetcher(Box::new(DummyCargoMetadataFetcher {
metadata_template: Some(templates::BASIC_METADATA.to_string()),
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap()
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_without_lock() {
let dir = TempDir::new().unwrap();
let toml_path = dir.path().join("Cargo.toml");
let mut toml = File::create(&toml_path).unwrap();
toml.write_all(basic_toml_contents().as_bytes()).unwrap();
let mut fetcher = RazeMetadataFetcher::new_with_settings(None);
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_with_lock() {
let dir = TempDir::new().unwrap();
// Create Cargo.toml
{
let path = dir.path().join("Cargo.toml");
| {
CargoMetadataFetcher {
cargo_bin_path: cargo_bin_path(),
}
} | identifier_body |
|
metadata.rs | did not contain a Cargo.toml file: `{}`",
crate_member_id_re.as_str(),
workspace_member_directory
)));
}
// Copy the Cargo.toml files into the temp directory to match the directory structure on disk
let path_diff = diff_paths(
&workspace_member_directory,
&no_deps_metadata.workspace_root,
)
.ok_or_else(|| {
anyhow!("All workspace members are expected to be under the workspace root")
})?;
let diff = Utf8PathBuf::from_path_buf(path_diff)
.map_err(|_e| anyhow!("Invalid UTF-8 in path diff."))?;
let new_path = temp_dir.join(diff);
fs::create_dir_all(&new_path)?;
fs::copy(
workspace_member_directory.join("Cargo.toml"),
new_path.join("Cargo.toml"),
)?;
// Additionally, symlink everything in some common source directories to ensure specified
// library targets can be relied on and won't prevent fetching metadata
for dir in vec!["bin", "src"].iter() {
let glob_pattern = format!("{}/**/*.rs", workspace_member_directory.join(dir));
for entry in glob(glob_pattern.as_str()).expect("Failed to read glob pattern") {
let path = Utf8PathBuf::from_path_buf(entry?)
.map_err(|_e| anyhow!("Invalid UTF-8 in source directory."))?;
// Determine the difference between the workspace root and the current file
let path_diff = diff_paths(&path, &no_deps_metadata.workspace_root).ok_or_else(|| {
anyhow!("All workspace members are expected to be under the workspace root")
})?;
let diff = Utf8PathBuf::from_path_buf(path_diff)
.map_err(|_e| anyhow!("Invalid UTF-8 in source directory path diff."))?;
// Create a matching directory tree for the current file within the temp workspace
let new_path = temp_dir.join(diff.as_path());
if let Some(parent) = new_path.parent() {
fs::create_dir_all(parent)?;
}
make_symlink(&path, &new_path)?;
}
}
}
Ok(())
}
/// Creates a copy workspace in a temporary directory for fetching the metadata of the current workspace
fn make_temp_workspace(&self, cargo_workspace_root: &Utf8Path) -> Result<(TempDir, Utf8PathBuf)> {
let temp_dir = TempDir::new()?;
// First gather metadata without downloading any dependencies so we can identify any path dependencies.
let no_deps_metadata = self
.metadata_fetcher
.fetch_metadata(cargo_workspace_root, /*include_deps=*/ false)?;
// There should be a `Cargo.toml` file in the workspace root
fs::copy(
no_deps_metadata.workspace_root.join("Cargo.toml"),
temp_dir.as_ref().join("Cargo.toml"),
)?;
// Optionally copy over the lock file
if no_deps_metadata.workspace_root.join("Cargo.lock").exists() {
fs::copy(
no_deps_metadata.workspace_root.join("Cargo.lock"),
temp_dir.as_ref().join("Cargo.lock"),
)?;
}
let source_dotcargo = cargo_workspace_root.join(".cargo");
let source_dotcargo_config = source_dotcargo.join("config.toml");
if source_dotcargo_config.exists() {
let destination_dotcargo = temp_dir.path().join(".cargo");
fs::create_dir(&destination_dotcargo)?;
let destination_dotcargo_config = destination_dotcargo.join("config.toml");
fs::copy(&source_dotcargo_config, &destination_dotcargo_config)?;
}
// Copy over the Cargo.toml files of each workspace member
let temp_path = Utf8Path::from_path(temp_dir.as_ref())
.ok_or_else(|| anyhow!("Invalid UTF-8 in temp path."))?;
self.link_src_to_workspace(&no_deps_metadata, temp_path)?;
Ok((temp_dir, no_deps_metadata.workspace_root))
}
/// Download a crate's source code from the current registry url
fn fetch_crate_src(&self, dir: &Utf8Path, name: &str, version: &str) -> Result<Utf8PathBuf> {
// The registry url should only be the host URL with ports. No path
let registry_url = {
let mut r_url = self.registry_url.clone();
r_url.set_path("");
r_url.to_string()
};
// Generate a URL with no path. This allows the path to keep any port information
// associated with it.
let mut url = url::Url::parse(®istry_url)?;
url.set_path("");
log::debug!("Cloning binary dependency: {}", &name);
let mut cloner = cargo_clone::Cloner::new();
cloner
.set_registry_url(url.to_string().trim_end_matches('/'))
.set_out_dir(dir);
cloner.clone(
cargo_clone::CloneMethodKind::Crate,
name,
Some(version),
&Vec::new(),
)?;
let crate_dir = dir.join(package_ident(name, version));
if!crate_dir.exists() {
return Err(anyhow!("Directory does not exist"));
}
Ok(crate_dir)
}
/// Add binary dependencies as workspace members to the given workspace root Cargo.toml file
fn inject_binaries_into_workspace(
&self,
binary_deps: Vec<String>,
root_toml: &Utf8Path,
) -> Result<()> {
// Read the current manifest
let mut manifest = {
let content = fs::read_to_string(root_toml)?;
cargo_toml::Manifest::from_str(content.as_str())?
};
// Parse the current `workspace` section of the manifest if one exists
let mut workspace = match manifest.workspace {
Some(workspace) => workspace,
None => cargo_toml::Workspace::default(),
};
// Add the binary dependencies as workspace members to the `workspace` metadata
for dep in binary_deps.iter() {
workspace.members.push(dep.to_string());
}
// Replace the workspace metadata with the modified metadata
manifest.workspace = Some(workspace);
// Write the metadata back to disk.
// cargo_toml::Manifest cannot be serialized direcly.
// see: https://gitlab.com/crates.rs/cargo_toml/-/issues/3
let value = toml::Value::try_from(&manifest)?;
std::fs::write(root_toml, toml::to_string(&value)?)
.with_context(|| format!("Failed to inject workspace metadata to {}", root_toml))
}
/// Look up a crate in a specified crate index to determine it's checksum
fn fetch_crate_checksum(&self, name: &str, version: &str) -> Result<String> {
let index_url_is_file = self.index_url.scheme().to_lowercase() == "file";
let crate_index_path = if!index_url_is_file {
crates_index::BareIndex::from_url(self.index_url.as_ref())?
.open_or_clone()?
.crate_(name)
.ok_or_else(|| anyhow!("Failed to find crate '{}' in index", name))?
} else {
crates_index::Index::new(self.index_url.path())
.crate_(name)
.ok_or_else(|| anyhow!("Failed to find crate '{}' in index", name))?
};
let (_index, crate_version) = crate_index_path
.versions()
.iter()
.enumerate()
.find(|(_, ver)| ver.version() == version)
.ok_or_else(|| anyhow!("Failed to find version {} for crate {}", version, name))?;
Ok(crate_version.checksum()[..].to_hex())
}
/// Ensures a lockfile is generated for a crate on disk
///
/// Args:
/// - reused_lockfile: An optional lockfile to use for fetching metadata to
/// ensure subsequent metadata fetches return consistent results.
/// - cargo_dir: The directory of the cargo workspace to gather metadata for.
/// Returns:
/// If a new lockfile was generated via the `lockfile_generator`, that
/// Lockfile object is returned. New lockfiles are generated when
/// `reused_lockfile` is not provided.
fn cargo_generate_lockfile(
&self,
reused_lockfile: &Option<Utf8PathBuf>,
cargo_dir: &Utf8Path,
) -> Result<Option<Lockfile>> {
let lockfile_path = cargo_dir.join("Cargo.lock");
// Use the reusable lockfile if one is provided
if let Some(reused_lockfile) = reused_lockfile {
fs::copy(reused_lockfile, &lockfile_path)?;
return Ok(None);
}
let lockfile = self.lockfile_generator.generate_lockfile(cargo_dir)?;
// Returning the lockfile here signifies that a new lockfile has been created.
Ok(Some(lockfile))
}
/// Gather all information about a Cargo project to use for planning and rendering steps
pub fn fetch_metadata(
&self,
cargo_workspace_root: &Utf8Path,
binary_dep_info: Option<&HashMap<String, cargo_toml::Dependency>>,
reused_lockfile: Option<Utf8PathBuf>,
) -> Result<RazeMetadata> {
let (cargo_dir, cargo_workspace_root) = self.make_temp_workspace(cargo_workspace_root)?;
let utf8_cargo_dir = Utf8Path::from_path(cargo_dir.as_ref())
.ok_or_else(|| anyhow!("Cargo dir has invalid UTF-8 in fetch_metadata."))?;
let cargo_root_toml = utf8_cargo_dir.join("Cargo.toml");
// Gather new lockfile data if any binary dependencies were provided
let mut checksums: HashMap<String, String> = HashMap::new();
if let Some(binary_dep_info) = binary_dep_info {
if!binary_dep_info.is_empty() {
let mut src_dirnames: Vec<String> = Vec::new();
for (name, info) in binary_dep_info.iter() {
let version = info.req();
let src_dir = self.fetch_crate_src(utf8_cargo_dir, name, version)?;
checksums.insert(
package_ident(name, version),
self.fetch_crate_checksum(name, version)?,
);
if let Some(dirname) = src_dir.file_name() {
src_dirnames.push(dirname.to_string());
}
}
self.inject_binaries_into_workspace(src_dirnames, &cargo_root_toml)?;
}
}
let output_lockfile = self.cargo_generate_lockfile(&reused_lockfile, utf8_cargo_dir)?;
// Load checksums from the lockfile
let workspace_toml_lock = cargo_dir.as_ref().join("Cargo.lock");
if workspace_toml_lock.exists() {
let lockfile = Lockfile::load(workspace_toml_lock)?;
for package in &lockfile.packages {
if let Some(checksum) = &package.checksum {
checksums.insert(
package_ident(package.name.as_ref(), &package.version.to_string()),
checksum.to_string(),
);
}
}
}
let metadata = self
.metadata_fetcher
.fetch_metadata(utf8_cargo_dir, /*include_deps=*/ true)?;
// In this function because it's metadata, even though it's not returned by `cargo-metadata`
let platform_features = match self.settings.as_ref() {
Some(settings) => get_per_platform_features(cargo_dir.path(), settings, &metadata.packages)?,
None => BTreeMap::new(),
};
Ok(RazeMetadata {
metadata,
checksums,
cargo_workspace_root,
lockfile: output_lockfile,
features: platform_features,
})
}
}
impl Default for RazeMetadataFetcher {
fn default() -> RazeMetadataFetcher {
RazeMetadataFetcher::new(
cargo_bin_path(),
// UNWRAP: The default is covered by testing and should never return err
Url::parse(DEFAULT_CRATE_REGISTRY_URL).unwrap(),
Url::parse(DEFAULT_CRATE_INDEX_URL).unwrap(),
None,
)
}
}
/// A struct containing information about a binary dependency
pub struct BinaryDependencyInfo {
pub name: String,
pub info: cargo_toml::Dependency,
pub lockfile: Option<Utf8PathBuf>,
}
#[cfg(test)]
pub mod tests {
use anyhow::Context;
use camino::Utf8PathBuf;
use httpmock::MockServer;
use tera::Tera;
use super::*;
use crate::testing::*;
use std::{fs::File, io::Write, str::FromStr};
pub struct DummyCargoMetadataFetcher {
pub metadata_template: Option<String>,
}
impl DummyCargoMetadataFetcher {
fn render_metadata(&self, mock_workspace_path: &Utf8Path) -> Option<Metadata> {
self.metadata_template.as_ref()?;
let dir = TempDir::new().unwrap();
let mut renderer = Tera::new(&format!("{}/*", dir.as_ref().display())).unwrap();
let templates_dir = Utf8PathBuf::from(std::file!())
.parent()
.unwrap()
.join("testing/metadata_templates")
.canonicalize()
.unwrap();
renderer
.add_raw_templates(vec![(
self.metadata_template.as_ref().unwrap(),
fs::read_to_string(templates_dir.join(self.metadata_template.as_ref().unwrap())).unwrap(),
)])
.unwrap();
let mut context = tera::Context::new();
context.insert("mock_workspace", &mock_workspace_path);
context.insert("crate_index_root", "/some/fake/home/path/.cargo");
let content = renderer
.render(self.metadata_template.as_ref().unwrap(), &context)
.unwrap();
Some(serde_json::from_str::<Metadata>(&content).unwrap())
}
}
impl MetadataFetcher for DummyCargoMetadataFetcher {
fn fetch_metadata(&self, working_dir: &Utf8Path, include_deps: bool) -> Result<Metadata> {
// Only use the template if the command is looking to reach out to the internet.
if include_deps {
if let Some(metadata) = self.render_metadata(working_dir) {
return Ok(metadata);
}
}
// Ensure no the command is ran in `offline` mode and no dependencies are checked.
MetadataCommand::new()
.cargo_path(cargo_bin_path())
.no_deps()
.current_dir(working_dir)
.other_options(vec!["--offline".to_string()])
.exec()
.with_context(|| {
format!(
"Failed to run `{} metadata` with contents:\n{}",
cargo_bin_path(),
fs::read_to_string(working_dir.join("Cargo.toml")).unwrap()
)
})
}
}
pub struct DummyLockfileGenerator {
// Optional lockfile to use for generation
pub lockfile_contents: Option<String>,
}
impl LockfileGenerator for DummyLockfileGenerator {
fn generate_lockfile(&self, _crate_root_dir: &Utf8Path) -> Result<Lockfile> {
match &self.lockfile_contents {
Some(contents) => Lockfile::from_str(contents)
.with_context(|| format!("Failed to load provided lockfile:\n{}", contents)),
None => Lockfile::from_str(basic_lock_contents())
.with_context(|| format!("Failed to load dummy lockfile:\n{}", basic_lock_contents())),
}
}
}
pub fn dummy_raze_metadata_fetcher() -> (RazeMetadataFetcher, MockServer, TempDir) {
let tempdir = TempDir::new().unwrap();
let mock_server = MockServer::start();
let mut fetcher = RazeMetadataFetcher::new(
cargo_bin_path(),
Url::parse(&mock_server.base_url()).unwrap(),
Url::parse(&format!("file://{}", tempdir.as_ref().display())).unwrap(),
None,
);
fetcher.set_metadata_fetcher(Box::new(DummyCargoMetadataFetcher {
metadata_template: None,
}));
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
(fetcher, mock_server, tempdir)
}
pub fn dummy_raze_metadata() -> RazeMetadata {
let dir = make_basic_workspace();
let (mut fetcher, _server, _index_dir) = dummy_raze_metadata_fetcher();
// Always render basic metadata
fetcher.set_metadata_fetcher(Box::new(DummyCargoMetadataFetcher {
metadata_template: Some(templates::BASIC_METADATA.to_string()),
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap()
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_without_lock() {
let dir = TempDir::new().unwrap();
let toml_path = dir.path().join("Cargo.toml");
let mut toml = File::create(&toml_path).unwrap();
toml.write_all(basic_toml_contents().as_bytes()).unwrap();
let mut fetcher = RazeMetadataFetcher::new_with_settings(None);
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_with_lock() {
let dir = TempDir::new().unwrap();
// Create Cargo.toml
{
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(basic_toml_contents().as_bytes()).unwrap();
}
// Create Cargo.lock
{
let path = dir.path().join("Cargo.lock");
let mut lock = File::create(&path).unwrap();
lock.write_all(basic_lock_contents().as_bytes()).unwrap();
}
let mut fetcher = RazeMetadataFetcher::default();
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_handles_bad_files() {
let dir = TempDir::new().unwrap();
// Create Cargo.toml
{
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(b"hello").unwrap();
}
let fetcher = RazeMetadataFetcher::default();
assert!(fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.is_err());
}
#[test]
fn test_fetching_src() {
let (fetcher, mock_server, _index_url) = dummy_raze_metadata_fetcher();
let mock = mock_remote_crate("fake-crate", "3.3.3", &mock_server);
let path = fetcher
.fetch_crate_src(utf8_path(mock.data_dir.as_ref()), "fake-crate", "3.3.3")
.unwrap();
for mock in mock.endpoints.iter() {
mock.assert();
}
assert!(path.exists());
// Ensure the name follows a consistent pattern: `{name}-{version}`
assert_eq!(
mock.data_dir.into_path().join("fake-crate-3.3.3").as_path(),
path.as_path()
);
assert!(path.join("Cargo.toml").exists());
assert!(path.join("Cargo.lock").exists());
assert!(path.join("test").exists());
}
#[test]
fn test_inject_dependency_to_workspace() {
let (fetcher, _mock_server, _index_url) = dummy_raze_metadata_fetcher();
let crate_dir = make_workspace_with_dependency();
let utf8_crate_dir = utf8_path(crate_dir.as_ref());
let cargo_toml_path = utf8_crate_dir.join("Cargo.toml");
let mut manifest =
cargo_toml::Manifest::from_str(fs::read_to_string(&cargo_toml_path).unwrap().as_str())
.unwrap();
let basic_dep_toml = crate_dir.as_ref().join("basic_dep/Cargo.toml");
fs::create_dir_all(basic_dep_toml.parent().unwrap()).unwrap();
fs::write(&basic_dep_toml, named_toml_contents("basic_dep", "0.0.1")).unwrap();
assert!(basic_dep_toml.exists());
manifest.workspace = Some({
let mut workspace = cargo_toml::Workspace::default();
workspace.members.push("test".to_string());
workspace
});
// Ensure the manifest only includes the new workspace member after the injection
assert_ne!(
cargo_toml::Manifest::from_str(fs::read_to_string(&cargo_toml_path).unwrap().as_str())
.unwrap(),
manifest
);
// Fetch metadata
fetcher
.inject_binaries_into_workspace(vec!["test".to_string()], &cargo_toml_path)
.unwrap();
// Ensure workspace now has the new member
assert_eq!(
cargo_toml::Manifest::from_str(fs::read_to_string(&cargo_toml_path).unwrap().as_str())
.unwrap(),
manifest
);
}
#[test]
fn test_generate_lockfile_use_previously_generated() {
let (fetcher, _mock_server, _index_url) = dummy_raze_metadata_fetcher();
let crate_dir = make_workspace_with_dependency();
let reused_lockfile =
Utf8PathBuf::from_path_buf(crate_dir.as_ref().join("locks_test/Cargo.raze.lock")).unwrap();
fs::create_dir_all(reused_lockfile.parent().unwrap()).unwrap();
fs::write(&reused_lockfile, "# test_generate_lockfile").unwrap();
// A reuse lockfile was provided so no new lockfile should be returned
assert!(fetcher
.cargo_generate_lockfile(
&Some(reused_lockfile.clone()),
utf8_path(crate_dir.as_ref())
)
.unwrap()
.is_none());
// Returns the built in lockfile
assert_eq!(
cargo_lock::Lockfile::load(crate_dir.as_ref().join("Cargo.lock")).unwrap(),
cargo_lock::Lockfile::load(&reused_lockfile).unwrap(),
);
}
#[test]
fn test_cargo_generate_lockfile_new_file() {
let (mut fetcher, _mock_server, _index_url) = dummy_raze_metadata_fetcher();
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: Some(advanced_lock_contents().to_string()),
}));
let crate_dir = make_workspace(advanced_toml_contents(), None);
// A new lockfile should have been created and it should match the expected contents for the advanced_toml workspace
assert_eq!(
fetcher
.cargo_generate_lockfile(&None, Utf8Path::from_path(crate_dir.as_ref()).unwrap())
.unwrap()
.unwrap(),
Lockfile::from_str(advanced_lock_contents()).unwrap()
);
}
#[test]
fn test_cargo_generate_lockfile_no_file() {
let (mut fetcher, _mock_server, _index_url) = dummy_raze_metadata_fetcher();
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: Some(advanced_lock_contents().to_string()),
}));
let crate_dir = make_workspace(advanced_toml_contents(), None);
let expected_lockfile =
Utf8PathBuf::from_path_buf(crate_dir.as_ref().join("expected/Cargo.expected.lock")).unwrap();
fs::create_dir_all(expected_lockfile.parent().unwrap()).unwrap();
fs::write(&expected_lockfile, advanced_lock_contents()).unwrap();
assert!(fetcher
.cargo_generate_lockfile(
&Some(expected_lockfile.clone()),
utf8_path(crate_dir.as_ref())
)
.unwrap() | random_line_split |
||
metadata.rs | {
cargo_bin_path: Utf8PathBuf,
}
impl LockfileGenerator for CargoLockfileGenerator {
/// Generate lockfile information from a cargo workspace root
fn generate_lockfile(&self, crate_root_dir: &Utf8Path) -> Result<Lockfile> {
let lockfile_path = crate_root_dir.join("Cargo.lock");
// Generate lockfile
let output = std::process::Command::new(&self.cargo_bin_path)
.arg("generate-lockfile")
.current_dir(crate_root_dir)
.output()
.with_context(|| format!("Generating lockfile in {}", crate_root_dir))?;
if!output.status.success() {
anyhow::bail!(
"Failed to generate lockfile in {}: {}",
crate_root_dir,
String::from_utf8_lossy(&output.stderr)
);
}
// Load lockfile contents
Lockfile::load(&lockfile_path)
.with_context(|| format!("Failed to load lockfile: {}", lockfile_path))
}
}
/// A struct containing all metadata about a project with which to plan generated output files for
#[derive(Debug, Clone)]
pub struct RazeMetadata {
// `cargo metadata` output of the current project
pub metadata: Metadata,
// The absolute path to the current project's cargo workspace root. Note that the workspace
// root in `metadata` will be inside of a temporary directory. For details see:
// https://doc.rust-lang.org/cargo/reference/workspaces.html#root-package
pub cargo_workspace_root: Utf8PathBuf,
// The metadata of a lockfile that was generated as a result of fetching metadata
pub lockfile: Option<Lockfile>,
// A map of all known crates with checksums. Use `checksums_for` to access data from this map.
pub checksums: HashMap<String, String>,
// A map of crates to their enabled general and per-platform features.
pub features: BTreeMap<PackageId, Features>,
}
impl RazeMetadata {
/// Get the checksum of a crate using a unique formatter.
pub fn checksum_for(&self, name: &str, version: &str) -> Option<&String> {
self.checksums.get(&package_ident(name, version))
}
}
/// Create a symlink file on unix systems
#[cfg(target_family = "unix")]
fn make_symlink(src: &Utf8Path, dest: &Utf8Path) -> Result<()> {
std::os::unix::fs::symlink(src, dest)
.with_context(|| "Failed to create symlink for generating metadata")
}
/// Create a symlink file on windows systems
#[cfg(target_family = "windows")]
fn make_symlink(src: &Utf8Path, dest: &Utf8Path) -> Result<()> {
std::os::windows::fs::symlink_file(src, dest)
.with_context(|| "Failed to create symlink for generating metadata")
}
/// A workspace metadata fetcher that uses the Cargo commands to gather information about a Cargo
/// project and it's transitive dependencies for planning and rendering of Bazel BUILD files.
pub struct RazeMetadataFetcher {
registry_url: Url,
index_url: Url,
metadata_fetcher: Box<dyn MetadataFetcher>,
lockfile_generator: Box<dyn LockfileGenerator>,
settings: Option<RazeSettings>,
}
impl RazeMetadataFetcher {
pub fn new<P: Into<Utf8PathBuf>>(
cargo_bin_path: P,
registry_url: Url,
index_url: Url,
settings: Option<RazeSettings>,
) -> RazeMetadataFetcher {
let cargo_bin_pathbuf: Utf8PathBuf = cargo_bin_path.into();
RazeMetadataFetcher {
registry_url,
index_url,
metadata_fetcher: Box::new(CargoMetadataFetcher {
cargo_bin_path: cargo_bin_pathbuf.clone(),
}),
lockfile_generator: Box::new(CargoLockfileGenerator {
cargo_bin_path: cargo_bin_pathbuf,
}),
settings,
}
}
pub fn new_with_settings(settings: Option<RazeSettings>) -> RazeMetadataFetcher {
RazeMetadataFetcher::new(
cargo_bin_path(),
// UNWRAP: The default is covered by testing and should never return err
Url::parse(DEFAULT_CRATE_REGISTRY_URL).unwrap(),
Url::parse(DEFAULT_CRATE_INDEX_URL).unwrap(),
settings,
)
}
/// Reassign the [`crate::metadata::MetadataFetcher`] associated with the Raze Metadata Fetcher
pub fn set_metadata_fetcher(&mut self, fetcher: Box<dyn MetadataFetcher>) {
self.metadata_fetcher = fetcher;
}
/// Reassign the [`crate::metadata::LockfileGenerator`] associated with the current Fetcher
pub fn set_lockfile_generator(&mut self, generator: Box<dyn LockfileGenerator>) {
self.lockfile_generator = generator;
}
/// Symlinks the source code of all workspace members into the temp workspace
fn link_src_to_workspace(&self, no_deps_metadata: &Metadata, temp_dir: &Utf8Path) -> Result<()> {
let crate_member_id_re = match consts::OS {
"windows" => Regex::new(r".+\(path\+file:///(.+)\)")?,
_ => Regex::new(r".+\(path\+file://(.+)\)")?,
};
for member in no_deps_metadata.workspace_members.iter() {
// Get a path to the workspace member directory
let workspace_member_directory = {
let crate_member_id_match = crate_member_id_re
.captures(&member.repr)
.and_then(|cap| cap.get(1));
if crate_member_id_match.is_none() {
continue;
}
// UNWRAP: guarded above
Utf8PathBuf::from(crate_member_id_match.unwrap().as_str())
};
// Sanity check: The assumption is that any crate with an `id` that matches
// the regex pattern above should contain a Cargo.toml file with which we
// can use to infer the existence of libraries from relative paths such as
// `src/lib.rs` and `src/main.rs`.
let toml_path = workspace_member_directory.join("Cargo.toml");
if!toml_path.exists() {
return Err(anyhow!(format!(
"The regex pattern `{}` found a path that did not contain a Cargo.toml file: `{}`",
crate_member_id_re.as_str(),
workspace_member_directory
)));
}
// Copy the Cargo.toml files into the temp directory to match the directory structure on disk
let path_diff = diff_paths(
&workspace_member_directory,
&no_deps_metadata.workspace_root,
)
.ok_or_else(|| {
anyhow!("All workspace members are expected to be under the workspace root")
})?;
let diff = Utf8PathBuf::from_path_buf(path_diff)
.map_err(|_e| anyhow!("Invalid UTF-8 in path diff."))?;
let new_path = temp_dir.join(diff);
fs::create_dir_all(&new_path)?;
fs::copy(
workspace_member_directory.join("Cargo.toml"),
new_path.join("Cargo.toml"),
)?;
// Additionally, symlink everything in some common source directories to ensure specified
// library targets can be relied on and won't prevent fetching metadata
for dir in vec!["bin", "src"].iter() {
let glob_pattern = format!("{}/**/*.rs", workspace_member_directory.join(dir));
for entry in glob(glob_pattern.as_str()).expect("Failed to read glob pattern") {
let path = Utf8PathBuf::from_path_buf(entry?)
.map_err(|_e| anyhow!("Invalid UTF-8 in source directory."))?;
// Determine the difference between the workspace root and the current file
let path_diff = diff_paths(&path, &no_deps_metadata.workspace_root).ok_or_else(|| {
anyhow!("All workspace members are expected to be under the workspace root")
})?;
let diff = Utf8PathBuf::from_path_buf(path_diff)
.map_err(|_e| anyhow!("Invalid UTF-8 in source directory path diff."))?;
// Create a matching directory tree for the current file within the temp workspace
let new_path = temp_dir.join(diff.as_path());
if let Some(parent) = new_path.parent() {
fs::create_dir_all(parent)?;
}
make_symlink(&path, &new_path)?;
}
}
}
Ok(())
}
/// Creates a copy workspace in a temporary directory for fetching the metadata of the current workspace
fn make_temp_workspace(&self, cargo_workspace_root: &Utf8Path) -> Result<(TempDir, Utf8PathBuf)> {
let temp_dir = TempDir::new()?;
// First gather metadata without downloading any dependencies so we can identify any path dependencies.
let no_deps_metadata = self
.metadata_fetcher
.fetch_metadata(cargo_workspace_root, /*include_deps=*/ false)?;
// There should be a `Cargo.toml` file in the workspace root
fs::copy(
no_deps_metadata.workspace_root.join("Cargo.toml"),
temp_dir.as_ref().join("Cargo.toml"),
)?;
// Optionally copy over the lock file
if no_deps_metadata.workspace_root.join("Cargo.lock").exists() {
fs::copy(
no_deps_metadata.workspace_root.join("Cargo.lock"),
temp_dir.as_ref().join("Cargo.lock"),
)?;
}
let source_dotcargo = cargo_workspace_root.join(".cargo");
let source_dotcargo_config = source_dotcargo.join("config.toml");
if source_dotcargo_config.exists() {
let destination_dotcargo = temp_dir.path().join(".cargo");
fs::create_dir(&destination_dotcargo)?;
let destination_dotcargo_config = destination_dotcargo.join("config.toml");
fs::copy(&source_dotcargo_config, &destination_dotcargo_config)?;
}
// Copy over the Cargo.toml files of each workspace member
let temp_path = Utf8Path::from_path(temp_dir.as_ref())
.ok_or_else(|| anyhow!("Invalid UTF-8 in temp path."))?;
self.link_src_to_workspace(&no_deps_metadata, temp_path)?;
Ok((temp_dir, no_deps_metadata.workspace_root))
}
/// Download a crate's source code from the current registry url
fn fetch_crate_src(&self, dir: &Utf8Path, name: &str, version: &str) -> Result<Utf8PathBuf> {
// The registry url should only be the host URL with ports. No path
let registry_url = {
let mut r_url = self.registry_url.clone();
r_url.set_path("");
r_url.to_string()
};
// Generate a URL with no path. This allows the path to keep any port information
// associated with it.
let mut url = url::Url::parse(®istry_url)?;
url.set_path("");
log::debug!("Cloning binary dependency: {}", &name);
let mut cloner = cargo_clone::Cloner::new();
cloner
.set_registry_url(url.to_string().trim_end_matches('/'))
.set_out_dir(dir);
cloner.clone(
cargo_clone::CloneMethodKind::Crate,
name,
Some(version),
&Vec::new(),
)?;
let crate_dir = dir.join(package_ident(name, version));
if!crate_dir.exists() {
return Err(anyhow!("Directory does not exist"));
}
Ok(crate_dir)
}
/// Add binary dependencies as workspace members to the given workspace root Cargo.toml file
fn inject_binaries_into_workspace(
&self,
binary_deps: Vec<String>,
root_toml: &Utf8Path,
) -> Result<()> {
// Read the current manifest
let mut manifest = {
let content = fs::read_to_string(root_toml)?;
cargo_toml::Manifest::from_str(content.as_str())?
};
// Parse the current `workspace` section of the manifest if one exists
let mut workspace = match manifest.workspace {
Some(workspace) => workspace,
None => cargo_toml::Workspace::default(),
};
// Add the binary dependencies as workspace members to the `workspace` metadata
for dep in binary_deps.iter() {
workspace.members.push(dep.to_string());
}
// Replace the workspace metadata with the modified metadata
manifest.workspace = Some(workspace);
// Write the metadata back to disk.
// cargo_toml::Manifest cannot be serialized direcly.
// see: https://gitlab.com/crates.rs/cargo_toml/-/issues/3
let value = toml::Value::try_from(&manifest)?;
std::fs::write(root_toml, toml::to_string(&value)?)
.with_context(|| format!("Failed to inject workspace metadata to {}", root_toml))
}
/// Look up a crate in a specified crate index to determine it's checksum
fn fetch_crate_checksum(&self, name: &str, version: &str) -> Result<String> {
let index_url_is_file = self.index_url.scheme().to_lowercase() == "file";
let crate_index_path = if!index_url_is_file {
crates_index::BareIndex::from_url(self.index_url.as_ref())?
.open_or_clone()?
.crate_(name)
.ok_or_else(|| anyhow!("Failed to find crate '{}' in index", name))?
} else {
crates_index::Index::new(self.index_url.path())
.crate_(name)
.ok_or_else(|| anyhow!("Failed to find crate '{}' in index", name))?
};
let (_index, crate_version) = crate_index_path
.versions()
.iter()
.enumerate()
.find(|(_, ver)| ver.version() == version)
.ok_or_else(|| anyhow!("Failed to find version {} for crate {}", version, name))?;
Ok(crate_version.checksum()[..].to_hex())
}
/// Ensures a lockfile is generated for a crate on disk
///
/// Args:
/// - reused_lockfile: An optional lockfile to use for fetching metadata to
/// ensure subsequent metadata fetches return consistent results.
/// - cargo_dir: The directory of the cargo workspace to gather metadata for.
/// Returns:
/// If a new lockfile was generated via the `lockfile_generator`, that
/// Lockfile object is returned. New lockfiles are generated when
/// `reused_lockfile` is not provided.
fn cargo_generate_lockfile(
&self,
reused_lockfile: &Option<Utf8PathBuf>,
cargo_dir: &Utf8Path,
) -> Result<Option<Lockfile>> {
let lockfile_path = cargo_dir.join("Cargo.lock");
// Use the reusable lockfile if one is provided
if let Some(reused_lockfile) = reused_lockfile {
fs::copy(reused_lockfile, &lockfile_path)?;
return Ok(None);
}
let lockfile = self.lockfile_generator.generate_lockfile(cargo_dir)?;
// Returning the lockfile here signifies that a new lockfile has been created.
Ok(Some(lockfile))
}
/// Gather all information about a Cargo project to use for planning and rendering steps
pub fn fetch_metadata(
&self,
cargo_workspace_root: &Utf8Path,
binary_dep_info: Option<&HashMap<String, cargo_toml::Dependency>>,
reused_lockfile: Option<Utf8PathBuf>,
) -> Result<RazeMetadata> {
let (cargo_dir, cargo_workspace_root) = self.make_temp_workspace(cargo_workspace_root)?;
let utf8_cargo_dir = Utf8Path::from_path(cargo_dir.as_ref())
.ok_or_else(|| anyhow!("Cargo dir has invalid UTF-8 in fetch_metadata."))?;
let cargo_root_toml = utf8_cargo_dir.join("Cargo.toml");
// Gather new lockfile data if any binary dependencies were provided
let mut checksums: HashMap<String, String> = HashMap::new();
if let Some(binary_dep_info) = binary_dep_info {
if!binary_dep_info.is_empty() {
let mut src_dirnames: Vec<String> = Vec::new();
for (name, info) in binary_dep_info.iter() {
let version = info.req();
let src_dir = self.fetch_crate_src(utf8_cargo_dir, name, version)?;
checksums.insert(
package_ident(name, version),
self.fetch_crate_checksum(name, version)?,
);
if let Some(dirname) = src_dir.file_name() {
src_dirnames.push(dirname.to_string());
}
}
self.inject_binaries_into_workspace(src_dirnames, &cargo_root_toml)?;
}
}
let output_lockfile = self.cargo_generate_lockfile(&reused_lockfile, utf8_cargo_dir)?;
// Load checksums from the lockfile
let workspace_toml_lock = cargo_dir.as_ref().join("Cargo.lock");
if workspace_toml_lock.exists() {
let lockfile = Lockfile::load(workspace_toml_lock)?;
for package in &lockfile.packages {
if let Some(checksum) = &package.checksum {
checksums.insert(
package_ident(package.name.as_ref(), &package.version.to_string()),
checksum.to_string(),
);
}
}
}
let metadata = self
.metadata_fetcher
.fetch_metadata(utf8_cargo_dir, /*include_deps=*/ true)?;
// In this function because it's metadata, even though it's not returned by `cargo-metadata`
let platform_features = match self.settings.as_ref() {
Some(settings) => get_per_platform_features(cargo_dir.path(), settings, &metadata.packages)?,
None => BTreeMap::new(),
};
Ok(RazeMetadata {
metadata,
checksums,
cargo_workspace_root,
lockfile: output_lockfile,
features: platform_features,
})
}
}
impl Default for RazeMetadataFetcher {
fn default() -> RazeMetadataFetcher {
RazeMetadataFetcher::new(
cargo_bin_path(),
// UNWRAP: The default is covered by testing and should never return err
Url::parse(DEFAULT_CRATE_REGISTRY_URL).unwrap(),
Url::parse(DEFAULT_CRATE_INDEX_URL).unwrap(),
None,
)
}
}
/// A struct containing information about a binary dependency
pub struct BinaryDependencyInfo {
pub name: String,
pub info: cargo_toml::Dependency,
pub lockfile: Option<Utf8PathBuf>,
}
#[cfg(test)]
pub mod tests {
use anyhow::Context;
use camino::Utf8PathBuf;
use httpmock::MockServer;
use tera::Tera;
use super::*;
use crate::testing::*;
use std::{fs::File, io::Write, str::FromStr};
pub struct DummyCargoMetadataFetcher {
pub metadata_template: Option<String>,
}
impl DummyCargoMetadataFetcher {
fn render_metadata(&self, mock_workspace_path: &Utf8Path) -> Option<Metadata> {
self.metadata_template.as_ref()?;
let dir = TempDir::new().unwrap();
let mut renderer = Tera::new(&format!("{}/*", dir.as_ref().display())).unwrap();
let templates_dir = Utf8PathBuf::from(std::file!())
.parent()
.unwrap()
.join("testing/metadata_templates")
.canonicalize()
.unwrap();
renderer
.add_raw_templates(vec![(
self.metadata_template.as_ref().unwrap(),
fs::read_to_string(templates_dir.join(self.metadata_template.as_ref().unwrap())).unwrap(),
)])
.unwrap();
let mut context = tera::Context::new();
context.insert("mock_workspace", &mock_workspace_path);
context.insert("crate_index_root", "/some/fake/home/path/.cargo");
let content = renderer
.render(self.metadata_template.as_ref().unwrap(), &context)
.unwrap();
Some(serde_json::from_str::<Metadata>(&content).unwrap())
}
}
impl MetadataFetcher for DummyCargoMetadataFetcher {
fn fetch_metadata(&self, working_dir: &Utf8Path, include_deps: bool) -> Result<Metadata> {
// Only use the template if the command is looking to reach out to the internet.
if include_deps {
if let Some(metadata) = self.render_metadata(working_dir) {
return Ok(metadata);
}
}
// Ensure no the command is ran in `offline` mode and no dependencies are checked.
MetadataCommand::new()
.cargo_path(cargo_bin_path())
.no_deps()
.current_dir(working_dir)
.other_options(vec!["--offline".to_string()])
.exec()
.with_context(|| {
format!(
"Failed to run `{} metadata` with contents:\n{}",
cargo_bin_path(),
fs::read_to_string(working_dir.join("Cargo.toml")).unwrap()
)
})
}
}
pub struct DummyLockfileGenerator {
// Optional lockfile to use for generation
pub lockfile_contents: Option<String>,
}
impl LockfileGenerator for DummyLockfileGenerator {
fn generate_lockfile(&self, _crate_root_dir: &Utf8Path) -> Result<Lockfile> {
match &self.lockfile_contents {
Some(contents) => Lockfile::from_str(contents)
.with_context(|| format!("Failed to load provided lockfile:\n{}", contents)),
None => Lockfile::from_str(basic_lock_contents())
.with_context(|| format!("Failed to load dummy lockfile:\n{}", basic_lock_contents())),
}
}
}
pub fn dummy_raze_metadata_fetcher() -> (RazeMetadataFetcher, MockServer, TempDir) {
let tempdir = TempDir::new().unwrap();
let mock_server = MockServer::start();
let mut fetcher = RazeMetadataFetcher::new(
cargo_bin_path(),
Url::parse(&mock_server.base_url()).unwrap(),
Url::parse(&format!("file://{}", tempdir.as_ref().display())).unwrap(),
None,
);
fetcher.set_metadata_fetcher(Box::new(DummyCargoMetadataFetcher {
metadata_template: None,
}));
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
(fetcher, mock_server, tempdir)
}
pub fn dummy_raze_metadata() -> RazeMetadata {
let dir = make_basic_workspace();
let (mut fetcher, _server, _index_dir) = dummy_raze_metadata_fetcher();
// Always render basic metadata
fetcher.set_metadata_fetcher(Box::new(DummyCargoMetadataFetcher {
metadata_template: Some(templates::BASIC_METADATA.to_string()),
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap()
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_without_lock() {
let dir = TempDir::new().unwrap();
let toml_path = dir.path().join("Cargo.toml");
let mut toml = File::create(&toml_path).unwrap();
toml.write_all(basic_toml_contents().as_bytes()).unwrap();
let mut fetcher = RazeMetadataFetcher::new_with_settings(None);
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_with_lock() {
let dir = TempDir::new().unwrap();
// Create Cargo.toml
{
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(basic_toml_contents().as_bytes()).unwrap();
}
// Create Cargo.lock
{
let path = dir.path().join("Cargo.lock");
let mut lock = File::create(&path).unwrap();
lock.write_all(basic_lock_contents().as_bytes()).unwrap();
}
let mut fetcher = RazeMetadataFetcher::default();
fetcher.set_lockfile_generator(Box::new(DummyLockfileGenerator {
lockfile_contents: None,
}));
fetcher
.fetch_metadata(utf8_path(dir.as_ref()), None, None)
.unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_handles_bad_files() {
let dir = TempDir::new().unwrap();
// Create Cargo.toml
{
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml | CargoLockfileGenerator | identifier_name |
|
main.rs | extern crate gl;
extern crate glfw;
extern crate time;
extern crate point;
use std::sync::mpsc::channel;
use std::thread::spawn;
use std::mem;
use std::ptr;
use std::ffi::CString;
use std::fs::File;
use std::io::Read;
use std::fmt::{ Display, Formatter };
use std::fmt;
use std::cmp::{Eq, PartialEq};
use gl::types::*;
use glfw::{Context, Key, OpenGlProfileHint, Window, WindowHint, WindowMode};
use point::Point;
mod gl_util;
mod mandel {
pub const DETAIL : u32 = 128;
pub fn calc(ox:f64, oy:f64) -> u32 {
let mut x = ox;
let mut y = oy;
for i in 0..DETAIL {
let xtemp = x*x - y*y + ox;
y = 2.0*x*y + oy;
x = xtemp;
if x*x + y*y > 4.0 {
return i;
}
}
return DETAIL;
}
}
struct TileSpecification {
pixels: Point<i32>,
center: Point<f64>,
zoom: f64,
}
impl PartialEq for TileSpecification {
fn eq(&self, other: &TileSpecification) -> bool {
self.pixels == other.pixels &&
self.center == other.center &&
self.zoom == other.zoom
}
}
impl Eq for TileSpecification {}
struct Tile {
specification: TileSpecification,
colors: Vec<GLfloat>,
positions: Vec<GLfloat>,
}
struct Line {
y: i32,
values: Vec<u32>,
}
struct HumanTimeDuration {
nanoseconds: u64,
}
impl Display for HumanTimeDuration {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
let ns = self.nanoseconds;
match ns {
0... 1_000 => fmt.write_fmt(format_args!("{} ns", ns)),
1_000... 1_000_000 => fmt.write_fmt(format_args!("{:.*} µs", 2, (ns as f64) / 1_000f64)),
1_000_000... 1_000_000_000 => fmt.write_fmt(format_args!("{:.*} ms", 2, (ns as f64) / 1_000_000f64)),
_ => fmt.write_fmt(format_args!("{:.*} s", 2, (ns as f64) / 1_000_000_000f64)),
}
}
}
// TODO: return result with a useful error type
fn load_shader(filename: &str) -> String {
let mut file = File::open(filename)
.ok().unwrap_or_else(|| File::open("src/".to_string()+filename)
.ok().expect(&format!("Could not open shader file {}", filename)));
let mut bytes = Vec::new();
file.read_to_end(&mut bytes).ok().expect(&format!("Failed to read from shader file {}", filename));
String::from_utf8(bytes).ok().expect(&format!("Shader file not UTF-8: {}", filename))
}
fn create_buffer() -> GLuint {
unsafe {
let mut buffer = 0;
gl::GenBuffers(1, &mut buffer);
buffer
}
}
unsafe fn load_vector_in_buffer(buffer: u32, values: &Vec<GLfloat>) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
gl::BufferData(gl::ARRAY_BUFFER,
(values.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
mem::transmute(&values[0]),
gl::STATIC_DRAW);
}
unsafe fn bind_attribute_to_buffer(program: u32, attribute_name: &str, buffer: u32, components: i32) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let attribute = gl::GetAttribLocation(program, CString::new(attribute_name).unwrap().as_ptr()) as GLuint;
gl::EnableVertexAttribArray(attribute);
gl::VertexAttribPointer(attribute, components, gl::FLOAT, gl::FALSE as GLboolean, 0, ptr::null());
}
fn world_width_from_zoom(zoom: f64) -> f64 {
2f64.powf(zoom)
}
unsafe fn set_viewport(program: GLuint, zoom: f64, pixels: &Point<i32>, center: &Point<f64>) {
let (world_width, world_height, world_left, _world_top, world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
gl::Uniform2f(gl::GetUniformLocation(program, CString::new("world_bottom_left").unwrap().as_ptr()), world_left as f32, world_bottom as f32);
gl::Uniform2f(gl::GetUniformLocation(program, CString::new("world_dimensions" ).unwrap().as_ptr()), world_width as f32, world_height as f32);
}
fn get_screen_in_world(zoom: f64, pixels: &Point<i32>, center: &Point<f64>) -> (f64, f64, f64, f64, f64) {
let width = pixels.x as f64;
let height = pixels.y as f64;
let world_width = world_width_from_zoom(zoom);
let world_height = world_width * height / width;
let world_left = center.x - world_width / 2.0;
let world_top = center.y + world_height / 2.0;
let world_bottom = center.y - world_height / 2.0;
(world_width, world_height, world_left, world_top, world_bottom)
}
fn pixel_to_world(pixel_coord: &Point<f64>, zoom: f64, pixels: &Point<i32>, center: &Point<f64>) -> Point<f64> {
let (world_width, world_height, world_left, world_top, _world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
Point {
x: pixel_coord.x / (pixels.x as f64) * world_width + world_left,
y: -pixel_coord.y / (pixels.y as f64) * world_height + world_top,
}
}
fn calc_mandelbrot(pixels: &Point<i32>, center: &Point<f64>, zoom: f64) -> (Vec<GLfloat>, Vec<GLfloat>) {
let start = time::precise_time_ns();
let mut colors : Vec<GLfloat> = vec![];
let mut positions : Vec<GLfloat> = vec![];
let width = pixels.x as f64;
let height = pixels.y as f64;
let (world_width, world_height, world_left, world_top, _world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
let (tx, rx) = channel();
for y_pixel in 0..pixels.y {
let tx = tx.clone();
let x_pixels = pixels.x;
spawn(move || {
let mut line = vec![];
for x_pixel in 0..x_pixels {
let x = (x_pixel as f64) / width * world_width + world_left;
let y = -(y_pixel as f64) / height * world_height + world_top;
let iterations = mandel::calc(x, y);
line.push(iterations);
}
tx.send(Line { y: y_pixel, values: line }).unwrap();
});
}
for _y_pixel in 0..pixels.y {
let line = rx.recv().unwrap();
let mut x_pixel = 0;
for value in line.values {
x_pixel += 1;
let y_pixel = line.y;
positions.push(( (x_pixel as f64) / width * world_width + world_left) as f32);
positions.push((-(y_pixel as f64) / height * world_height + world_top ) as f32);
let color = value as GLfloat / mandel::DETAIL as GLfloat;
colors.push(color);
colors.push(color);
colors.push(color);
}
}
let end = time::precise_time_ns();
println!("Calculated fractal in {}", HumanTimeDuration { nanoseconds: end - start });
(positions, colors)
}
fn draw_fractal(positions : &Vec<GLfloat>, colors : &Vec<GLfloat>, vertex_buffer : GLuint, color_buffer : GLuint, window: &mut Window) {
let points = colors.len() / 3;
unsafe {
load_vector_in_buffer(vertex_buffer, &positions);
load_vector_in_buffer(color_buffer, &colors);
gl::DrawArrays(gl::POINTS, 0, points as i32);
window.swap_buffers();
}
}
fn main() {
let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
glfw.window_hint(WindowHint::ContextVersion(3, 2));
glfw.window_hint(WindowHint::OpenGlForwardCompat(true));
glfw.window_hint(WindowHint::OpenGlProfile(OpenGlProfileHint::Core));
let x_initial_points = 500;
let y_initial_points = 300;
// since mouse button events don't send mouse positions, we need to store them
let mut mouse = Point::new(0f64, 0f64);
let mut mouse_start_pan = Point::new(0f64, 0f64);
let mut mouse_button_1_pressed = false;
let mut zoom = 2.0;
let mut center = Point::new(-0.7, 0.0);
let (mut window, events) = glfw.create_window(x_initial_points, y_initial_points, "Mandelbrot", WindowMode::Windowed)
.expect("Failed to create GLFW window.");
let mut pixels = {
let (x_pixels, y_pixels) = window.get_framebuffer_size();
Point::new(x_pixels, y_pixels)
};
// on "retina displays" there are two pixels per point, otherwise, it is one
let pixel_size = pixels.x / (x_initial_points as i32);
window.set_key_polling(true);
window.set_framebuffer_size_polling(true);
window.set_scroll_polling(true);
window.set_cursor_pos_polling(true);
window.set_mouse_button_polling(true);
window.make_current();
gl::load_with(|s| window.get_proc_address(s));
let vertex_shader = gl_util::compile_shader(&load_shader("mandel.v.glsl"), gl::VERTEX_SHADER);
let fragment_shader = gl_util::compile_shader(&load_shader("mandel.f.glsl"), gl::FRAGMENT_SHADER);
let program = gl_util::link_program(vertex_shader, fragment_shader);
unsafe {
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
let mut vertex_array = 0;
let vertex_buffer = create_buffer();
let color_buffer = create_buffer();
unsafe {
gl::GenVertexArrays(1, &mut vertex_array);
gl::BindVertexArray(vertex_array);
gl::UseProgram(program);
gl::BindFragDataLocation(program, 0, CString::new("out_color").unwrap().as_ptr());
bind_attribute_to_buffer(program, "position", vertex_buffer, 2);
bind_attribute_to_buffer(program, "color", color_buffer, 3);
}
let mut current_tile : Option<Tile> = None;
let (tx_incoming_order, rx_incoming_order ) = channel();
let (tx_completed_order, rx_completed_order) = channel();
spawn(move || {
loop {
let tile_spec : TileSpecification = rx_incoming_order.recv().unwrap();
let (positions, colors) = calc_mandelbrot(&tile_spec.pixels, &tile_spec.center, tile_spec.zoom);
tx_completed_order.send(Tile { specification: tile_spec, positions: positions, colors: colors }).unwrap();
}
});
let mut tile_queue_empty = true;
while!window.should_close() {
let mut needs_redraw = false;
glfw.poll_events();
for (_, event) in glfw::flush_messages(&events) {
match event {
glfw::WindowEvent::Key(Key::Escape, _, _, _) => {
window.set_should_close(true)
}
glfw::WindowEvent::FramebufferSize(width, height) => {
pixels.x = width;
pixels.y = height;
needs_redraw = true;
}
glfw::WindowEvent::Scroll(_x, y) => {
let old_world = pixel_to_world(&mouse, zoom, &pixels, ¢er);
zoom += y;
let new_world = pixel_to_world(&mouse, zoom, &pixels, ¢er);
center = center + old_world - new_world;
needs_redraw = true;
}
glfw::WindowEvent::MouseButton(glfw::MouseButton::Button1, glfw::Action::Press, _) => {
mouse_button_1_pressed = true;
mouse_start_pan = mouse;
}
glfw::WindowEvent::MouseButton(glfw::MouseButton::Button1, glfw::Action::Release, _) => {
mouse_button_1_pressed = false;
}
glfw::WindowEvent::CursorPos(x, y) => {
mouse.x = x;
mouse.y = y;
if mouse_button_1_pressed {
let world_per_pixel = world_width_from_zoom(zoom) / (pixels.x as f64);
let world_per_point = world_per_pixel * (pixel_size as f64);
let mut mouse_movement = mouse - mouse_start_pan;
mouse_movement.y = -mouse_movement.y;
center = center - mouse_movement * world_per_point;
mouse_start_pan = mouse;
needs_redraw = true;
}
}
e => { println!("Unhandled event: {:?}", e); }
}
}
match rx_completed_order.try_recv() {
Ok(tile) => {
current_tile = Some(tile);
tile_queue_empty = true;
needs_redraw = true;
},
_ => {
// TODO: Handle disconnect
}
}
if needs_redraw { |
let new_tile_spec = TileSpecification { pixels: pixels, center: center, zoom: zoom };
let needs_new_tile = match current_tile {
None => true,
Some(ref tile) => {
tile.specification!= new_tile_spec
},
};
if tile_queue_empty && needs_new_tile {
tx_incoming_order.send(new_tile_spec).unwrap();
tile_queue_empty = false;
}
}
unsafe {
gl::DeleteProgram(program);
gl::DeleteShader(fragment_shader);
gl::DeleteShader(vertex_shader);
gl::DeleteBuffers(1, &color_buffer);
gl::DeleteBuffers(1, &vertex_buffer);
gl::DeleteVertexArrays(1, &vertex_array);
}
}
|
unsafe {
gl::ClearColor(0.2, 0.1, 0.05, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
unsafe { set_viewport(program, zoom, &pixels, ¢er) };
match current_tile {
Some(ref tile) => {
draw_fractal(&tile.positions, &tile.colors, vertex_buffer, color_buffer, &mut window);
}
None => { /* no tile ready yet */ }
}
}
| conditional_block |
main.rs | extern crate gl;
extern crate glfw;
extern crate time;
extern crate point;
use std::sync::mpsc::channel;
use std::thread::spawn;
use std::mem;
use std::ptr;
use std::ffi::CString;
use std::fs::File;
use std::io::Read;
use std::fmt::{ Display, Formatter };
use std::fmt;
use std::cmp::{Eq, PartialEq};
use gl::types::*;
use glfw::{Context, Key, OpenGlProfileHint, Window, WindowHint, WindowMode};
use point::Point;
mod gl_util;
mod mandel {
pub const DETAIL : u32 = 128;
pub fn calc(ox:f64, oy:f64) -> u32 {
let mut x = ox;
let mut y = oy;
for i in 0..DETAIL {
let xtemp = x*x - y*y + ox;
y = 2.0*x*y + oy;
x = xtemp;
if x*x + y*y > 4.0 {
return i;
}
}
return DETAIL;
}
}
struct TileSpecification {
pixels: Point<i32>,
center: Point<f64>,
zoom: f64,
}
impl PartialEq for TileSpecification {
fn eq(&self, other: &TileSpecification) -> bool {
self.pixels == other.pixels &&
self.center == other.center &&
self.zoom == other.zoom
}
}
impl Eq for TileSpecification {}
struct Tile {
specification: TileSpecification,
colors: Vec<GLfloat>,
positions: Vec<GLfloat>,
}
struct Line {
y: i32,
values: Vec<u32>,
}
struct HumanTimeDuration {
nanoseconds: u64,
}
impl Display for HumanTimeDuration {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
let ns = self.nanoseconds;
match ns {
0... 1_000 => fmt.write_fmt(format_args!("{} ns", ns)),
1_000... 1_000_000 => fmt.write_fmt(format_args!("{:.*} µs", 2, (ns as f64) / 1_000f64)),
1_000_000... 1_000_000_000 => fmt.write_fmt(format_args!("{:.*} ms", 2, (ns as f64) / 1_000_000f64)),
_ => fmt.write_fmt(format_args!("{:.*} s", 2, (ns as f64) / 1_000_000_000f64)),
}
}
}
// TODO: return result with a useful error type
fn load_shader(filename: &str) -> String {
let mut file = File::open(filename)
.ok().unwrap_or_else(|| File::open("src/".to_string()+filename)
.ok().expect(&format!("Could not open shader file {}", filename)));
let mut bytes = Vec::new();
file.read_to_end(&mut bytes).ok().expect(&format!("Failed to read from shader file {}", filename));
String::from_utf8(bytes).ok().expect(&format!("Shader file not UTF-8: {}", filename))
}
fn create_buffer() -> GLuint {
unsafe {
let mut buffer = 0;
gl::GenBuffers(1, &mut buffer);
buffer
}
}
unsafe fn load_vector_in_buffer(buffer: u32, values: &Vec<GLfloat>) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
gl::BufferData(gl::ARRAY_BUFFER,
(values.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
mem::transmute(&values[0]),
gl::STATIC_DRAW);
}
unsafe fn bind_attribute_to_buffer(program: u32, attribute_name: &str, buffer: u32, components: i32) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let attribute = gl::GetAttribLocation(program, CString::new(attribute_name).unwrap().as_ptr()) as GLuint;
gl::EnableVertexAttribArray(attribute);
gl::VertexAttribPointer(attribute, components, gl::FLOAT, gl::FALSE as GLboolean, 0, ptr::null());
}
fn world_width_from_zoom(zoom: f64) -> f64 {
2f64.powf(zoom)
}
unsafe fn set_viewport(program: GLuint, zoom: f64, pixels: &Point<i32>, center: &Point<f64>) {
let (world_width, world_height, world_left, _world_top, world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
gl::Uniform2f(gl::GetUniformLocation(program, CString::new("world_bottom_left").unwrap().as_ptr()), world_left as f32, world_bottom as f32);
gl::Uniform2f(gl::GetUniformLocation(program, CString::new("world_dimensions" ).unwrap().as_ptr()), world_width as f32, world_height as f32);
}
fn get_screen_in_world(zoom: f64, pixels: &Point<i32>, center: &Point<f64>) -> (f64, f64, f64, f64, f64) {
let width = pixels.x as f64;
let height = pixels.y as f64;
let world_width = world_width_from_zoom(zoom);
let world_height = world_width * height / width;
let world_left = center.x - world_width / 2.0;
let world_top = center.y + world_height / 2.0;
let world_bottom = center.y - world_height / 2.0;
(world_width, world_height, world_left, world_top, world_bottom)
}
fn pixel_to_world(pixel_coord: &Point<f64>, zoom: f64, pixels: &Point<i32>, center: &Point<f64>) -> Point<f64> {
let (world_width, world_height, world_left, world_top, _world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
Point {
x: pixel_coord.x / (pixels.x as f64) * world_width + world_left,
y: -pixel_coord.y / (pixels.y as f64) * world_height + world_top,
}
}
fn calc_mandelbrot(pixels: &Point<i32>, center: &Point<f64>, zoom: f64) -> (Vec<GLfloat>, Vec<GLfloat>) {
let start = time::precise_time_ns();
let mut colors : Vec<GLfloat> = vec![];
let mut positions : Vec<GLfloat> = vec![];
let width = pixels.x as f64;
let height = pixels.y as f64;
let (world_width, world_height, world_left, world_top, _world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
let (tx, rx) = channel();
for y_pixel in 0..pixels.y {
let tx = tx.clone();
let x_pixels = pixels.x;
spawn(move || {
let mut line = vec![];
for x_pixel in 0..x_pixels {
let x = (x_pixel as f64) / width * world_width + world_left;
let y = -(y_pixel as f64) / height * world_height + world_top;
let iterations = mandel::calc(x, y);
line.push(iterations);
}
tx.send(Line { y: y_pixel, values: line }).unwrap();
});
}
for _y_pixel in 0..pixels.y {
let line = rx.recv().unwrap();
let mut x_pixel = 0;
for value in line.values {
x_pixel += 1;
let y_pixel = line.y;
positions.push(( (x_pixel as f64) / width * world_width + world_left) as f32);
positions.push((-(y_pixel as f64) / height * world_height + world_top ) as f32);
let color = value as GLfloat / mandel::DETAIL as GLfloat;
colors.push(color);
colors.push(color);
colors.push(color);
}
}
let end = time::precise_time_ns();
println!("Calculated fractal in {}", HumanTimeDuration { nanoseconds: end - start });
(positions, colors)
}
fn draw_fractal(positions : &Vec<GLfloat>, colors : &Vec<GLfloat>, vertex_buffer : GLuint, color_buffer : GLuint, window: &mut Window) {
let points = colors.len() / 3;
unsafe {
load_vector_in_buffer(vertex_buffer, &positions);
load_vector_in_buffer(color_buffer, &colors);
gl::DrawArrays(gl::POINTS, 0, points as i32);
window.swap_buffers();
}
}
fn main() {
let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
glfw.window_hint(WindowHint::ContextVersion(3, 2));
glfw.window_hint(WindowHint::OpenGlForwardCompat(true));
glfw.window_hint(WindowHint::OpenGlProfile(OpenGlProfileHint::Core));
let x_initial_points = 500;
let y_initial_points = 300;
// since mouse button events don't send mouse positions, we need to store them
let mut mouse = Point::new(0f64, 0f64);
let mut mouse_start_pan = Point::new(0f64, 0f64);
let mut mouse_button_1_pressed = false;
let mut zoom = 2.0;
let mut center = Point::new(-0.7, 0.0);
let (mut window, events) = glfw.create_window(x_initial_points, y_initial_points, "Mandelbrot", WindowMode::Windowed)
.expect("Failed to create GLFW window.");
let mut pixels = {
let (x_pixels, y_pixels) = window.get_framebuffer_size();
Point::new(x_pixels, y_pixels)
};
// on "retina displays" there are two pixels per point, otherwise, it is one
let pixel_size = pixels.x / (x_initial_points as i32);
window.set_key_polling(true);
window.set_framebuffer_size_polling(true);
window.set_scroll_polling(true);
window.set_cursor_pos_polling(true);
window.set_mouse_button_polling(true);
window.make_current();
gl::load_with(|s| window.get_proc_address(s));
let vertex_shader = gl_util::compile_shader(&load_shader("mandel.v.glsl"), gl::VERTEX_SHADER);
let fragment_shader = gl_util::compile_shader(&load_shader("mandel.f.glsl"), gl::FRAGMENT_SHADER);
let program = gl_util::link_program(vertex_shader, fragment_shader);
unsafe {
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
let mut vertex_array = 0;
let vertex_buffer = create_buffer();
let color_buffer = create_buffer();
unsafe {
gl::GenVertexArrays(1, &mut vertex_array);
gl::BindVertexArray(vertex_array);
gl::UseProgram(program);
gl::BindFragDataLocation(program, 0, CString::new("out_color").unwrap().as_ptr());
bind_attribute_to_buffer(program, "position", vertex_buffer, 2);
bind_attribute_to_buffer(program, "color", color_buffer, 3);
}
let mut current_tile : Option<Tile> = None;
let (tx_incoming_order, rx_incoming_order ) = channel();
let (tx_completed_order, rx_completed_order) = channel();
spawn(move || {
loop {
let tile_spec : TileSpecification = rx_incoming_order.recv().unwrap();
let (positions, colors) = calc_mandelbrot(&tile_spec.pixels, &tile_spec.center, tile_spec.zoom);
tx_completed_order.send(Tile { specification: tile_spec, positions: positions, colors: colors }).unwrap();
}
});
let mut tile_queue_empty = true;
while!window.should_close() {
let mut needs_redraw = false;
glfw.poll_events();
for (_, event) in glfw::flush_messages(&events) {
match event {
glfw::WindowEvent::Key(Key::Escape, _, _, _) => {
window.set_should_close(true)
}
glfw::WindowEvent::FramebufferSize(width, height) => {
pixels.x = width;
pixels.y = height;
needs_redraw = true;
}
glfw::WindowEvent::Scroll(_x, y) => {
let old_world = pixel_to_world(&mouse, zoom, &pixels, ¢er);
zoom += y;
let new_world = pixel_to_world(&mouse, zoom, &pixels, ¢er);
center = center + old_world - new_world;
needs_redraw = true;
}
glfw::WindowEvent::MouseButton(glfw::MouseButton::Button1, glfw::Action::Press, _) => {
mouse_button_1_pressed = true;
mouse_start_pan = mouse;
}
glfw::WindowEvent::MouseButton(glfw::MouseButton::Button1, glfw::Action::Release, _) => {
mouse_button_1_pressed = false;
}
glfw::WindowEvent::CursorPos(x, y) => {
mouse.x = x;
mouse.y = y;
if mouse_button_1_pressed {
let world_per_pixel = world_width_from_zoom(zoom) / (pixels.x as f64);
let world_per_point = world_per_pixel * (pixel_size as f64);
let mut mouse_movement = mouse - mouse_start_pan;
mouse_movement.y = -mouse_movement.y;
center = center - mouse_movement * world_per_point;
mouse_start_pan = mouse;
needs_redraw = true;
}
}
e => { println!("Unhandled event: {:?}", e); }
}
}
match rx_completed_order.try_recv() {
Ok(tile) => {
current_tile = Some(tile);
tile_queue_empty = true;
needs_redraw = true;
},
_ => {
// TODO: Handle disconnect
}
}
if needs_redraw {
unsafe {
gl::ClearColor(0.2, 0.1, 0.05, 1.0); |
unsafe { set_viewport(program, zoom, &pixels, ¢er) };
match current_tile {
Some(ref tile) => {
draw_fractal(&tile.positions, &tile.colors, vertex_buffer, color_buffer, &mut window);
}
None => { /* no tile ready yet */ }
}
}
let new_tile_spec = TileSpecification { pixels: pixels, center: center, zoom: zoom };
let needs_new_tile = match current_tile {
None => true,
Some(ref tile) => {
tile.specification!= new_tile_spec
},
};
if tile_queue_empty && needs_new_tile {
tx_incoming_order.send(new_tile_spec).unwrap();
tile_queue_empty = false;
}
}
unsafe {
gl::DeleteProgram(program);
gl::DeleteShader(fragment_shader);
gl::DeleteShader(vertex_shader);
gl::DeleteBuffers(1, &color_buffer);
gl::DeleteBuffers(1, &vertex_buffer);
gl::DeleteVertexArrays(1, &vertex_array);
}
} | gl::Clear(gl::COLOR_BUFFER_BIT);
} | random_line_split |
main.rs | extern crate gl;
extern crate glfw;
extern crate time;
extern crate point;
use std::sync::mpsc::channel;
use std::thread::spawn;
use std::mem;
use std::ptr;
use std::ffi::CString;
use std::fs::File;
use std::io::Read;
use std::fmt::{ Display, Formatter };
use std::fmt;
use std::cmp::{Eq, PartialEq};
use gl::types::*;
use glfw::{Context, Key, OpenGlProfileHint, Window, WindowHint, WindowMode};
use point::Point;
mod gl_util;
mod mandel {
pub const DETAIL : u32 = 128;
pub fn calc(ox:f64, oy:f64) -> u32 {
let mut x = ox;
let mut y = oy;
for i in 0..DETAIL {
let xtemp = x*x - y*y + ox;
y = 2.0*x*y + oy;
x = xtemp;
if x*x + y*y > 4.0 {
return i;
}
}
return DETAIL;
}
}
struct TileSpecification {
pixels: Point<i32>,
center: Point<f64>,
zoom: f64,
}
impl PartialEq for TileSpecification {
fn eq(&self, other: &TileSpecification) -> bool {
self.pixels == other.pixels &&
self.center == other.center &&
self.zoom == other.zoom
}
}
impl Eq for TileSpecification {}
struct Tile {
specification: TileSpecification,
colors: Vec<GLfloat>,
positions: Vec<GLfloat>,
}
struct Line {
y: i32,
values: Vec<u32>,
}
struct HumanTimeDuration {
nanoseconds: u64,
}
impl Display for HumanTimeDuration {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
let ns = self.nanoseconds;
match ns {
0... 1_000 => fmt.write_fmt(format_args!("{} ns", ns)),
1_000... 1_000_000 => fmt.write_fmt(format_args!("{:.*} µs", 2, (ns as f64) / 1_000f64)),
1_000_000... 1_000_000_000 => fmt.write_fmt(format_args!("{:.*} ms", 2, (ns as f64) / 1_000_000f64)),
_ => fmt.write_fmt(format_args!("{:.*} s", 2, (ns as f64) / 1_000_000_000f64)),
}
}
}
// TODO: return result with a useful error type
fn load_shader(filename: &str) -> String {
let mut file = File::open(filename)
.ok().unwrap_or_else(|| File::open("src/".to_string()+filename)
.ok().expect(&format!("Could not open shader file {}", filename)));
let mut bytes = Vec::new();
file.read_to_end(&mut bytes).ok().expect(&format!("Failed to read from shader file {}", filename));
String::from_utf8(bytes).ok().expect(&format!("Shader file not UTF-8: {}", filename))
}
fn create_buffer() -> GLuint {
unsafe {
let mut buffer = 0;
gl::GenBuffers(1, &mut buffer);
buffer
}
}
unsafe fn load_vector_in_buffer(buffer: u32, values: &Vec<GLfloat>) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
gl::BufferData(gl::ARRAY_BUFFER,
(values.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
mem::transmute(&values[0]),
gl::STATIC_DRAW);
}
unsafe fn bind_attribute_to_buffer(program: u32, attribute_name: &str, buffer: u32, components: i32) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let attribute = gl::GetAttribLocation(program, CString::new(attribute_name).unwrap().as_ptr()) as GLuint;
gl::EnableVertexAttribArray(attribute);
gl::VertexAttribPointer(attribute, components, gl::FLOAT, gl::FALSE as GLboolean, 0, ptr::null());
}
fn world_width_from_zoom(zoom: f64) -> f64 {
2f64.powf(zoom)
}
unsafe fn set_viewport(program: GLuint, zoom: f64, pixels: &Point<i32>, center: &Point<f64>) {
let (world_width, world_height, world_left, _world_top, world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
gl::Uniform2f(gl::GetUniformLocation(program, CString::new("world_bottom_left").unwrap().as_ptr()), world_left as f32, world_bottom as f32);
gl::Uniform2f(gl::GetUniformLocation(program, CString::new("world_dimensions" ).unwrap().as_ptr()), world_width as f32, world_height as f32);
}
fn get_screen_in_world(zoom: f64, pixels: &Point<i32>, center: &Point<f64>) -> (f64, f64, f64, f64, f64) {
let width = pixels.x as f64;
let height = pixels.y as f64;
let world_width = world_width_from_zoom(zoom);
let world_height = world_width * height / width;
let world_left = center.x - world_width / 2.0;
let world_top = center.y + world_height / 2.0;
let world_bottom = center.y - world_height / 2.0;
(world_width, world_height, world_left, world_top, world_bottom)
}
fn pixel_to_world(pixel_coord: &Point<f64>, zoom: f64, pixels: &Point<i32>, center: &Point<f64>) -> Point<f64> {
let (world_width, world_height, world_left, world_top, _world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
Point {
x: pixel_coord.x / (pixels.x as f64) * world_width + world_left,
y: -pixel_coord.y / (pixels.y as f64) * world_height + world_top,
}
}
fn calc_mandelbrot(pixels: &Point<i32>, center: &Point<f64>, zoom: f64) -> (Vec<GLfloat>, Vec<GLfloat>) {
let start = time::precise_time_ns();
let mut colors : Vec<GLfloat> = vec![];
let mut positions : Vec<GLfloat> = vec![];
let width = pixels.x as f64;
let height = pixels.y as f64;
let (world_width, world_height, world_left, world_top, _world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
let (tx, rx) = channel();
for y_pixel in 0..pixels.y {
let tx = tx.clone();
let x_pixels = pixels.x;
spawn(move || {
let mut line = vec![];
for x_pixel in 0..x_pixels {
let x = (x_pixel as f64) / width * world_width + world_left;
let y = -(y_pixel as f64) / height * world_height + world_top;
let iterations = mandel::calc(x, y);
line.push(iterations);
}
tx.send(Line { y: y_pixel, values: line }).unwrap();
});
}
for _y_pixel in 0..pixels.y {
let line = rx.recv().unwrap();
let mut x_pixel = 0;
for value in line.values {
x_pixel += 1;
let y_pixel = line.y;
positions.push(( (x_pixel as f64) / width * world_width + world_left) as f32);
positions.push((-(y_pixel as f64) / height * world_height + world_top ) as f32);
let color = value as GLfloat / mandel::DETAIL as GLfloat;
colors.push(color);
colors.push(color);
colors.push(color);
}
}
let end = time::precise_time_ns();
println!("Calculated fractal in {}", HumanTimeDuration { nanoseconds: end - start });
(positions, colors)
}
fn draw_fractal(positions : &Vec<GLfloat>, colors : &Vec<GLfloat>, vertex_buffer : GLuint, color_buffer : GLuint, window: &mut Window) { |
fn main() {
let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
glfw.window_hint(WindowHint::ContextVersion(3, 2));
glfw.window_hint(WindowHint::OpenGlForwardCompat(true));
glfw.window_hint(WindowHint::OpenGlProfile(OpenGlProfileHint::Core));
let x_initial_points = 500;
let y_initial_points = 300;
// since mouse button events don't send mouse positions, we need to store them
let mut mouse = Point::new(0f64, 0f64);
let mut mouse_start_pan = Point::new(0f64, 0f64);
let mut mouse_button_1_pressed = false;
let mut zoom = 2.0;
let mut center = Point::new(-0.7, 0.0);
let (mut window, events) = glfw.create_window(x_initial_points, y_initial_points, "Mandelbrot", WindowMode::Windowed)
.expect("Failed to create GLFW window.");
let mut pixels = {
let (x_pixels, y_pixels) = window.get_framebuffer_size();
Point::new(x_pixels, y_pixels)
};
// on "retina displays" there are two pixels per point, otherwise, it is one
let pixel_size = pixels.x / (x_initial_points as i32);
window.set_key_polling(true);
window.set_framebuffer_size_polling(true);
window.set_scroll_polling(true);
window.set_cursor_pos_polling(true);
window.set_mouse_button_polling(true);
window.make_current();
gl::load_with(|s| window.get_proc_address(s));
let vertex_shader = gl_util::compile_shader(&load_shader("mandel.v.glsl"), gl::VERTEX_SHADER);
let fragment_shader = gl_util::compile_shader(&load_shader("mandel.f.glsl"), gl::FRAGMENT_SHADER);
let program = gl_util::link_program(vertex_shader, fragment_shader);
unsafe {
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
let mut vertex_array = 0;
let vertex_buffer = create_buffer();
let color_buffer = create_buffer();
unsafe {
gl::GenVertexArrays(1, &mut vertex_array);
gl::BindVertexArray(vertex_array);
gl::UseProgram(program);
gl::BindFragDataLocation(program, 0, CString::new("out_color").unwrap().as_ptr());
bind_attribute_to_buffer(program, "position", vertex_buffer, 2);
bind_attribute_to_buffer(program, "color", color_buffer, 3);
}
let mut current_tile : Option<Tile> = None;
let (tx_incoming_order, rx_incoming_order ) = channel();
let (tx_completed_order, rx_completed_order) = channel();
spawn(move || {
loop {
let tile_spec : TileSpecification = rx_incoming_order.recv().unwrap();
let (positions, colors) = calc_mandelbrot(&tile_spec.pixels, &tile_spec.center, tile_spec.zoom);
tx_completed_order.send(Tile { specification: tile_spec, positions: positions, colors: colors }).unwrap();
}
});
let mut tile_queue_empty = true;
while!window.should_close() {
let mut needs_redraw = false;
glfw.poll_events();
for (_, event) in glfw::flush_messages(&events) {
match event {
glfw::WindowEvent::Key(Key::Escape, _, _, _) => {
window.set_should_close(true)
}
glfw::WindowEvent::FramebufferSize(width, height) => {
pixels.x = width;
pixels.y = height;
needs_redraw = true;
}
glfw::WindowEvent::Scroll(_x, y) => {
let old_world = pixel_to_world(&mouse, zoom, &pixels, ¢er);
zoom += y;
let new_world = pixel_to_world(&mouse, zoom, &pixels, ¢er);
center = center + old_world - new_world;
needs_redraw = true;
}
glfw::WindowEvent::MouseButton(glfw::MouseButton::Button1, glfw::Action::Press, _) => {
mouse_button_1_pressed = true;
mouse_start_pan = mouse;
}
glfw::WindowEvent::MouseButton(glfw::MouseButton::Button1, glfw::Action::Release, _) => {
mouse_button_1_pressed = false;
}
glfw::WindowEvent::CursorPos(x, y) => {
mouse.x = x;
mouse.y = y;
if mouse_button_1_pressed {
let world_per_pixel = world_width_from_zoom(zoom) / (pixels.x as f64);
let world_per_point = world_per_pixel * (pixel_size as f64);
let mut mouse_movement = mouse - mouse_start_pan;
mouse_movement.y = -mouse_movement.y;
center = center - mouse_movement * world_per_point;
mouse_start_pan = mouse;
needs_redraw = true;
}
}
e => { println!("Unhandled event: {:?}", e); }
}
}
match rx_completed_order.try_recv() {
Ok(tile) => {
current_tile = Some(tile);
tile_queue_empty = true;
needs_redraw = true;
},
_ => {
// TODO: Handle disconnect
}
}
if needs_redraw {
unsafe {
gl::ClearColor(0.2, 0.1, 0.05, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
unsafe { set_viewport(program, zoom, &pixels, ¢er) };
match current_tile {
Some(ref tile) => {
draw_fractal(&tile.positions, &tile.colors, vertex_buffer, color_buffer, &mut window);
}
None => { /* no tile ready yet */ }
}
}
let new_tile_spec = TileSpecification { pixels: pixels, center: center, zoom: zoom };
let needs_new_tile = match current_tile {
None => true,
Some(ref tile) => {
tile.specification!= new_tile_spec
},
};
if tile_queue_empty && needs_new_tile {
tx_incoming_order.send(new_tile_spec).unwrap();
tile_queue_empty = false;
}
}
unsafe {
gl::DeleteProgram(program);
gl::DeleteShader(fragment_shader);
gl::DeleteShader(vertex_shader);
gl::DeleteBuffers(1, &color_buffer);
gl::DeleteBuffers(1, &vertex_buffer);
gl::DeleteVertexArrays(1, &vertex_array);
}
}
|
let points = colors.len() / 3;
unsafe {
load_vector_in_buffer(vertex_buffer, &positions);
load_vector_in_buffer(color_buffer, &colors);
gl::DrawArrays(gl::POINTS, 0, points as i32);
window.swap_buffers();
}
}
| identifier_body |
main.rs | extern crate gl;
extern crate glfw;
extern crate time;
extern crate point;
use std::sync::mpsc::channel;
use std::thread::spawn;
use std::mem;
use std::ptr;
use std::ffi::CString;
use std::fs::File;
use std::io::Read;
use std::fmt::{ Display, Formatter };
use std::fmt;
use std::cmp::{Eq, PartialEq};
use gl::types::*;
use glfw::{Context, Key, OpenGlProfileHint, Window, WindowHint, WindowMode};
use point::Point;
mod gl_util;
mod mandel {
pub const DETAIL : u32 = 128;
pub fn calc(ox:f64, oy:f64) -> u32 {
let mut x = ox;
let mut y = oy;
for i in 0..DETAIL {
let xtemp = x*x - y*y + ox;
y = 2.0*x*y + oy;
x = xtemp;
if x*x + y*y > 4.0 {
return i;
}
}
return DETAIL;
}
}
struct TileSpecification {
pixels: Point<i32>,
center: Point<f64>,
zoom: f64,
}
impl PartialEq for TileSpecification {
fn eq(&self, other: &TileSpecification) -> bool {
self.pixels == other.pixels &&
self.center == other.center &&
self.zoom == other.zoom
}
}
impl Eq for TileSpecification {}
struct Tile {
specification: TileSpecification,
colors: Vec<GLfloat>,
positions: Vec<GLfloat>,
}
struct Line {
y: i32,
values: Vec<u32>,
}
struct HumanTimeDuration {
nanoseconds: u64,
}
impl Display for HumanTimeDuration {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
let ns = self.nanoseconds;
match ns {
0... 1_000 => fmt.write_fmt(format_args!("{} ns", ns)),
1_000... 1_000_000 => fmt.write_fmt(format_args!("{:.*} µs", 2, (ns as f64) / 1_000f64)),
1_000_000... 1_000_000_000 => fmt.write_fmt(format_args!("{:.*} ms", 2, (ns as f64) / 1_000_000f64)),
_ => fmt.write_fmt(format_args!("{:.*} s", 2, (ns as f64) / 1_000_000_000f64)),
}
}
}
// TODO: return result with a useful error type
fn load_shader(filename: &str) -> String {
let mut file = File::open(filename)
.ok().unwrap_or_else(|| File::open("src/".to_string()+filename)
.ok().expect(&format!("Could not open shader file {}", filename)));
let mut bytes = Vec::new();
file.read_to_end(&mut bytes).ok().expect(&format!("Failed to read from shader file {}", filename));
String::from_utf8(bytes).ok().expect(&format!("Shader file not UTF-8: {}", filename))
}
fn create_buffer() -> GLuint {
unsafe {
let mut buffer = 0;
gl::GenBuffers(1, &mut buffer);
buffer
}
}
unsafe fn l | buffer: u32, values: &Vec<GLfloat>) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
gl::BufferData(gl::ARRAY_BUFFER,
(values.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
mem::transmute(&values[0]),
gl::STATIC_DRAW);
}
unsafe fn bind_attribute_to_buffer(program: u32, attribute_name: &str, buffer: u32, components: i32) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let attribute = gl::GetAttribLocation(program, CString::new(attribute_name).unwrap().as_ptr()) as GLuint;
gl::EnableVertexAttribArray(attribute);
gl::VertexAttribPointer(attribute, components, gl::FLOAT, gl::FALSE as GLboolean, 0, ptr::null());
}
fn world_width_from_zoom(zoom: f64) -> f64 {
2f64.powf(zoom)
}
unsafe fn set_viewport(program: GLuint, zoom: f64, pixels: &Point<i32>, center: &Point<f64>) {
let (world_width, world_height, world_left, _world_top, world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
gl::Uniform2f(gl::GetUniformLocation(program, CString::new("world_bottom_left").unwrap().as_ptr()), world_left as f32, world_bottom as f32);
gl::Uniform2f(gl::GetUniformLocation(program, CString::new("world_dimensions" ).unwrap().as_ptr()), world_width as f32, world_height as f32);
}
fn get_screen_in_world(zoom: f64, pixels: &Point<i32>, center: &Point<f64>) -> (f64, f64, f64, f64, f64) {
let width = pixels.x as f64;
let height = pixels.y as f64;
let world_width = world_width_from_zoom(zoom);
let world_height = world_width * height / width;
let world_left = center.x - world_width / 2.0;
let world_top = center.y + world_height / 2.0;
let world_bottom = center.y - world_height / 2.0;
(world_width, world_height, world_left, world_top, world_bottom)
}
fn pixel_to_world(pixel_coord: &Point<f64>, zoom: f64, pixels: &Point<i32>, center: &Point<f64>) -> Point<f64> {
let (world_width, world_height, world_left, world_top, _world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
Point {
x: pixel_coord.x / (pixels.x as f64) * world_width + world_left,
y: -pixel_coord.y / (pixels.y as f64) * world_height + world_top,
}
}
fn calc_mandelbrot(pixels: &Point<i32>, center: &Point<f64>, zoom: f64) -> (Vec<GLfloat>, Vec<GLfloat>) {
let start = time::precise_time_ns();
let mut colors : Vec<GLfloat> = vec![];
let mut positions : Vec<GLfloat> = vec![];
let width = pixels.x as f64;
let height = pixels.y as f64;
let (world_width, world_height, world_left, world_top, _world_bottom) = get_screen_in_world(zoom, &pixels, ¢er);
let (tx, rx) = channel();
for y_pixel in 0..pixels.y {
let tx = tx.clone();
let x_pixels = pixels.x;
spawn(move || {
let mut line = vec![];
for x_pixel in 0..x_pixels {
let x = (x_pixel as f64) / width * world_width + world_left;
let y = -(y_pixel as f64) / height * world_height + world_top;
let iterations = mandel::calc(x, y);
line.push(iterations);
}
tx.send(Line { y: y_pixel, values: line }).unwrap();
});
}
for _y_pixel in 0..pixels.y {
let line = rx.recv().unwrap();
let mut x_pixel = 0;
for value in line.values {
x_pixel += 1;
let y_pixel = line.y;
positions.push(( (x_pixel as f64) / width * world_width + world_left) as f32);
positions.push((-(y_pixel as f64) / height * world_height + world_top ) as f32);
let color = value as GLfloat / mandel::DETAIL as GLfloat;
colors.push(color);
colors.push(color);
colors.push(color);
}
}
let end = time::precise_time_ns();
println!("Calculated fractal in {}", HumanTimeDuration { nanoseconds: end - start });
(positions, colors)
}
fn draw_fractal(positions : &Vec<GLfloat>, colors : &Vec<GLfloat>, vertex_buffer : GLuint, color_buffer : GLuint, window: &mut Window) {
let points = colors.len() / 3;
unsafe {
load_vector_in_buffer(vertex_buffer, &positions);
load_vector_in_buffer(color_buffer, &colors);
gl::DrawArrays(gl::POINTS, 0, points as i32);
window.swap_buffers();
}
}
fn main() {
let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
glfw.window_hint(WindowHint::ContextVersion(3, 2));
glfw.window_hint(WindowHint::OpenGlForwardCompat(true));
glfw.window_hint(WindowHint::OpenGlProfile(OpenGlProfileHint::Core));
let x_initial_points = 500;
let y_initial_points = 300;
// since mouse button events don't send mouse positions, we need to store them
let mut mouse = Point::new(0f64, 0f64);
let mut mouse_start_pan = Point::new(0f64, 0f64);
let mut mouse_button_1_pressed = false;
let mut zoom = 2.0;
let mut center = Point::new(-0.7, 0.0);
let (mut window, events) = glfw.create_window(x_initial_points, y_initial_points, "Mandelbrot", WindowMode::Windowed)
.expect("Failed to create GLFW window.");
let mut pixels = {
let (x_pixels, y_pixels) = window.get_framebuffer_size();
Point::new(x_pixels, y_pixels)
};
// on "retina displays" there are two pixels per point, otherwise, it is one
let pixel_size = pixels.x / (x_initial_points as i32);
window.set_key_polling(true);
window.set_framebuffer_size_polling(true);
window.set_scroll_polling(true);
window.set_cursor_pos_polling(true);
window.set_mouse_button_polling(true);
window.make_current();
gl::load_with(|s| window.get_proc_address(s));
let vertex_shader = gl_util::compile_shader(&load_shader("mandel.v.glsl"), gl::VERTEX_SHADER);
let fragment_shader = gl_util::compile_shader(&load_shader("mandel.f.glsl"), gl::FRAGMENT_SHADER);
let program = gl_util::link_program(vertex_shader, fragment_shader);
unsafe {
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
let mut vertex_array = 0;
let vertex_buffer = create_buffer();
let color_buffer = create_buffer();
unsafe {
gl::GenVertexArrays(1, &mut vertex_array);
gl::BindVertexArray(vertex_array);
gl::UseProgram(program);
gl::BindFragDataLocation(program, 0, CString::new("out_color").unwrap().as_ptr());
bind_attribute_to_buffer(program, "position", vertex_buffer, 2);
bind_attribute_to_buffer(program, "color", color_buffer, 3);
}
let mut current_tile : Option<Tile> = None;
let (tx_incoming_order, rx_incoming_order ) = channel();
let (tx_completed_order, rx_completed_order) = channel();
spawn(move || {
loop {
let tile_spec : TileSpecification = rx_incoming_order.recv().unwrap();
let (positions, colors) = calc_mandelbrot(&tile_spec.pixels, &tile_spec.center, tile_spec.zoom);
tx_completed_order.send(Tile { specification: tile_spec, positions: positions, colors: colors }).unwrap();
}
});
let mut tile_queue_empty = true;
while!window.should_close() {
let mut needs_redraw = false;
glfw.poll_events();
for (_, event) in glfw::flush_messages(&events) {
match event {
glfw::WindowEvent::Key(Key::Escape, _, _, _) => {
window.set_should_close(true)
}
glfw::WindowEvent::FramebufferSize(width, height) => {
pixels.x = width;
pixels.y = height;
needs_redraw = true;
}
glfw::WindowEvent::Scroll(_x, y) => {
let old_world = pixel_to_world(&mouse, zoom, &pixels, ¢er);
zoom += y;
let new_world = pixel_to_world(&mouse, zoom, &pixels, ¢er);
center = center + old_world - new_world;
needs_redraw = true;
}
glfw::WindowEvent::MouseButton(glfw::MouseButton::Button1, glfw::Action::Press, _) => {
mouse_button_1_pressed = true;
mouse_start_pan = mouse;
}
glfw::WindowEvent::MouseButton(glfw::MouseButton::Button1, glfw::Action::Release, _) => {
mouse_button_1_pressed = false;
}
glfw::WindowEvent::CursorPos(x, y) => {
mouse.x = x;
mouse.y = y;
if mouse_button_1_pressed {
let world_per_pixel = world_width_from_zoom(zoom) / (pixels.x as f64);
let world_per_point = world_per_pixel * (pixel_size as f64);
let mut mouse_movement = mouse - mouse_start_pan;
mouse_movement.y = -mouse_movement.y;
center = center - mouse_movement * world_per_point;
mouse_start_pan = mouse;
needs_redraw = true;
}
}
e => { println!("Unhandled event: {:?}", e); }
}
}
match rx_completed_order.try_recv() {
Ok(tile) => {
current_tile = Some(tile);
tile_queue_empty = true;
needs_redraw = true;
},
_ => {
// TODO: Handle disconnect
}
}
if needs_redraw {
unsafe {
gl::ClearColor(0.2, 0.1, 0.05, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
unsafe { set_viewport(program, zoom, &pixels, ¢er) };
match current_tile {
Some(ref tile) => {
draw_fractal(&tile.positions, &tile.colors, vertex_buffer, color_buffer, &mut window);
}
None => { /* no tile ready yet */ }
}
}
let new_tile_spec = TileSpecification { pixels: pixels, center: center, zoom: zoom };
let needs_new_tile = match current_tile {
None => true,
Some(ref tile) => {
tile.specification!= new_tile_spec
},
};
if tile_queue_empty && needs_new_tile {
tx_incoming_order.send(new_tile_spec).unwrap();
tile_queue_empty = false;
}
}
unsafe {
gl::DeleteProgram(program);
gl::DeleteShader(fragment_shader);
gl::DeleteShader(vertex_shader);
gl::DeleteBuffers(1, &color_buffer);
gl::DeleteBuffers(1, &vertex_buffer);
gl::DeleteVertexArrays(1, &vertex_array);
}
}
| oad_vector_in_buffer( | identifier_name |
model.rs | use std::collections::{BTreeMap, HashMap};
use task::*;
use task_ref::TaskRef;
use std::io;
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum Effect {
AddTask(Task),
ChangeTaskTags {
uuid: Uuid,
added: Tags,
removed: Tags,
},
ChangeTaskState(Uuid, TaskState),
ChangeTaskPriority(Uuid, Priority),
DeleteTask(Uuid),
// Undo,
}
impl Effect {
fn task_id<'a>(&'a self) -> &'a Uuid {
use Effect::*;
match *self {
AddTask(Task { ref uuid,.. }) => uuid,
ChangeTaskTags { ref uuid,.. } => uuid,
ChangeTaskState(ref u, _) => u,
ChangeTaskPriority(ref u, _) => u,
DeleteTask(ref u) => u,
}
}
pub fn print(&self, model: &Model, out: &mut io::Write) -> io::Result<()> {
use Effect::*;
// Special handling for `DeleteTask` as `task` isn't in
// `model` anymore
if let DeleteTask(_) = self {
writeln!(out, "Deleted task {}", self.task_id())?;
} else {
let task = model.get_task(self.task_id()).unwrap(); // TODO
match self {
DeleteTask(_) => unreachable!(),
AddTask(_) => writeln!(out, "Added Task {}", task.short_id())?,
ChangeTaskTags {
ref added,
ref removed,
..
} => {
if!added.is_empty() {
writeln!(out, "Added tags {:?}", added)?;
}
if!removed.is_empty() {
writeln!(out, "Removed tags {:?}", removed)?;
}
}
ChangeTaskState(_uuid, ref state) => match *state {
TaskState::Done(_) => writeln!(out, "Marking task '{}' as done", task.description)?,
TaskState::Open => writeln!(out, "Marking task '{}' as open", task.description)?,
TaskState::Canceled(_) => {
writeln!(out, "Marking task '{}' as canceled", task.description)?
}
},
ChangeTaskPriority(_uuid, ref priority) => {
writeln!(
out,
"Changed priority of task '{}' to {}",
task.description, priority
)?;
}
};
}
Ok(())
}
}
pub type ScopeName = String;
pub type NumericalIds = HashMap<ScopeName, BTreeMap<u64, Uuid>>;
pub struct Model {
// TODO: hide `tasks` and add `archived_tasks`
pub tasks: HashMap<Uuid, Task>,
pub applied_effects: Vec<Effect>,
pub numerical_ids: NumericalIds,
is_dirty: bool,
}
impl Model {
pub fn new() -> Self {
Model {
tasks: HashMap::new(),
applied_effects: Vec::new(),
numerical_ids: NumericalIds::new(),
is_dirty: false,
}
}
pub fn from_effects(effects: &[Effect]) -> Self {
let mut model = Self::new();
for effect in effects {
model.apply_effect(&effect)
}
model.is_dirty = false;
model
}
pub fn apply_effect(&mut self, effect: &Effect) -> () {
use Effect::*;
match effect.clone() {
AddTask(task) => {
self.add_task(task);
}
ChangeTaskTags {
uuid,
added,
removed,
} => {
self.change_task_tags(&uuid, added, removed);
}
ChangeTaskState(uuid, state) => {
self.change_task_state(&uuid, state);
}
ChangeTaskPriority(uuid, p) => {
self.change_task_priority(&uuid, p);
}
DeleteTask(uuid) => {
self.delete_task(&uuid);
}
}
self.applied_effects.push(effect.clone());
self.is_dirty = true;
}
fn add_task(&mut self, t: Task) -> () {
if self.tasks.insert(t.uuid, t).is_some() {
panic!("UUID collision in Model::add_task");
}
}
fn delete_task(&mut self, u: &Uuid) -> Option<Task> {
self.tasks.remove(&u)
}
fn change_task_state(&mut self, u: &Uuid, state: TaskState) {
self.tasks.get_mut(u).expect("failed to get task").status = state;
}
fn change_task_priority(&mut self, u: &Uuid, priority: Priority) {
self.tasks.get_mut(u).expect("failed to get task").priority = priority;
}
fn change_task_tags(&mut self, u: &Uuid, added: Tags, removed: Tags) {
let ref mut tags = self.tasks.get_mut(u).expect("failed to get task").tags;
for t in removed {
tags.remove(&t);
}
for t in added {
tags.insert(t);
}
}
}
// Numerical-ID Handling
impl Model {
pub fn short_task_id(&self, scope_name: &str, task_id: &Uuid) -> Option<u64> {
self.numerical_ids
.get(scope_name)
.and_then(|ids| ids.iter().find(|&(_, uuid)| uuid == task_id))
.map(|(n, _)| *n)
}
pub fn recalculate_numerical_ids(&mut self, scope: &str, task_ids: &[Uuid]) {
info!("Recalculating numerical-ids for scope {}", scope);
self.is_dirty = true;
let ids = task_ids
.iter()
.enumerate()
.map(|(n, uuid)| ((n as u64) + 1, uuid.clone()))
.collect();
self.numerical_ids.insert(scope.into(), ids);
}
pub fn incremental_numerical_id(&mut self, scope: &str, task: &Uuid) -> u64 {
debug!(
"Calculating incremental numerical-id for {} in scope {}",
task, scope
);
assert!(self.get_task(task).is_some());
self.short_task_id(scope, task).unwrap_or_else(|| {
self.is_dirty = true;
let numerical_ids = self.numerical_ids
.entry(scope.into())
.or_insert(BTreeMap::new());
let n = numerical_ids.iter().map(|(id, _)| *id).max().unwrap_or(0) + 1;
numerical_ids.insert(n, task.clone());
n
})
}
}
#[derive(Debug, PartialEq, Eq, Fail)]
pub enum FindTaskError {
#[fail(display = "Couldn't find task")]
TaskNotFound,
#[fail(display = "Found multiple tasks")]
MultipleResults,
}
pub struct TaskIter<'a> {
tasks: Vec<&'a Task>,
pos: usize,
}
impl<'a> Iterator for TaskIter<'a> {
type Item = &'a Task;
fn next(&mut self) -> Option<Self::Item> {
let v = self.tasks.get(self.pos);
self.pos += 1;
v.map(|x| *x)
}
}
impl Model {
pub fn all_tasks<'a>(&'a self) -> TaskIter<'a> {
let mut v: Vec<&Task> = self.tasks.values().collect();
v.sort_by(|a, b| b.cmp(a));
TaskIter { tasks: v, pos: 0 }
}
pub fn get_task<'a>(&'a self, uuid: &Uuid) -> Option<&'a Task> {
self.tasks.get(uuid)
}
pub fn find_task<'a>(
&'a self,
scope_name: &str,
task_ref: &TaskRef,
) -> Result<&'a Task, FindTaskError> {
let uuids: Vec<&Uuid> = match *task_ref {
TaskRef::FullUUID(ref u) => vec![u],
TaskRef::ShortUUID(ref s) => self.tasks
.keys()
.filter(|uuid| uuid.simple().to_string().starts_with(s))
.collect(),
TaskRef::Numerical(ref n) => {
match self.numerical_ids.get(scope_name).and_then(|x| x.get(n)) {
Some(uuid) => vec![uuid],
None => vec![],
}
}
};
use self::FindTaskError::*;
match uuids.len() {
0 => Err(TaskNotFound),
1 => self.get_task(uuids[0])
.map_or(Err(FindTaskError::TaskNotFound), Ok),
_ => Err(MultipleResults),
}
}
pub fn is_dirty(&self) -> bool {
self.is_dirty
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono;
use std::str::FromStr;
use uuid::Uuid;
use {Priority, Task, TaskState};
#[test]
fn test_add_delete_task() {
let mut m = Model::new();
let t = Task::new("foo");
m.add_task(t.clone());
assert_eq!(m.get_task(&t.uuid), Some(&t));
assert_eq!(m.delete_task(&t.uuid), Some(t.clone()));
assert_eq!(m.get_task(&t.uuid), None);
}
#[test]
fn test_change_task_task() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.tasks[&uuid].status, TaskState::Open);
let s = TaskState::Done(chrono::Utc::now());
m.change_task_state(&uuid, s);
assert_eq!(m.tasks[&uuid].status, s);
}
#[test]
fn test_change_task_priority() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.tasks[&uuid].priority, Priority::Default);
m.change_task_priority(&uuid, Priority::High);
assert_eq!(m.tasks[&uuid].priority, Priority::High);
}
#[test]
fn test_numerical_ref() {
assert_eq!(TaskRef::from_str("42"), Ok(TaskRef::Numerical(42)));
assert_eq!(TaskRef::from_str("0"), Ok(TaskRef::Numerical(0)));
assert!(TaskRef::from_str("-0").is_err());
}
| fn test_short_uuid_ref() {
for s in vec!["abcdef", "123abc", "000000"] {
assert_eq!(TaskRef::from_str(s), Ok(TaskRef::ShortUUID(s.into())));
}
assert!(
TaskRef::from_str("abcde").is_err(),
"Short-UUID with len of 5"
);
assert!(
TaskRef::from_str("abcdef1").is_err(),
"Short-UUID with len of 7"
);
// Make sure that short-UUIDs are preferred
assert_eq!(
TaskRef::from_str("123456"),
Ok(TaskRef::ShortUUID("123456".into()))
);
// non-base16 symbols
assert!(TaskRef::from_str("rivers").is_err());
}
#[test]
fn test_full_uuid_ref() {
for _ in 1..100 {
let uuid = Uuid::new_v4();
assert_eq!(
TaskRef::from_str(&uuid.hyphenated().to_string()),
Ok(TaskRef::FullUUID(uuid))
);
}
}
#[test]
fn test_incremental_numerical_id_empty_scope() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.incremental_numerical_id("defaut", &uuid), 1);
}
#[test]
#[should_panic]
fn test_incremental_numerical_id_unknown_task() {
let mut m = Model::new();
m.incremental_numerical_id("default", &Uuid::new_v4());
}
#[test]
fn test_incremental_numerical_id_already_exists() {
let mut m = Model::new();
let t = Task::new("foo");
m.add_task(t.clone());
m.recalculate_numerical_ids("default", &vec![t.uuid]);
assert_eq!(m.incremental_numerical_id("default", &t.uuid), 1);
}
#[test]
fn test_incremental_numerical_id() {
let mut m = Model::new();
let t = Task::new("foo");
let t2 = Task::new("bar");
m.add_task(t.clone());
m.recalculate_numerical_ids("default", &vec![t.uuid]);
m.add_task(t2.clone());
assert_eq!(m.short_task_id("default", &t.uuid), Some(1));
assert_eq!(m.incremental_numerical_id("default", &t2.uuid), 2);
assert_eq!(m.short_task_id("default", &t2.uuid), Some(2));
}
} | #[test] | random_line_split |
model.rs | use std::collections::{BTreeMap, HashMap};
use task::*;
use task_ref::TaskRef;
use std::io;
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum Effect {
AddTask(Task),
ChangeTaskTags {
uuid: Uuid,
added: Tags,
removed: Tags,
},
ChangeTaskState(Uuid, TaskState),
ChangeTaskPriority(Uuid, Priority),
DeleteTask(Uuid),
// Undo,
}
impl Effect {
fn task_id<'a>(&'a self) -> &'a Uuid {
use Effect::*;
match *self {
AddTask(Task { ref uuid,.. }) => uuid,
ChangeTaskTags { ref uuid,.. } => uuid,
ChangeTaskState(ref u, _) => u,
ChangeTaskPriority(ref u, _) => u,
DeleteTask(ref u) => u,
}
}
pub fn print(&self, model: &Model, out: &mut io::Write) -> io::Result<()> {
use Effect::*;
// Special handling for `DeleteTask` as `task` isn't in
// `model` anymore
if let DeleteTask(_) = self {
writeln!(out, "Deleted task {}", self.task_id())?;
} else {
let task = model.get_task(self.task_id()).unwrap(); // TODO
match self {
DeleteTask(_) => unreachable!(),
AddTask(_) => writeln!(out, "Added Task {}", task.short_id())?,
ChangeTaskTags {
ref added,
ref removed,
..
} => {
if!added.is_empty() {
writeln!(out, "Added tags {:?}", added)?;
}
if!removed.is_empty() {
writeln!(out, "Removed tags {:?}", removed)?;
}
}
ChangeTaskState(_uuid, ref state) => match *state {
TaskState::Done(_) => writeln!(out, "Marking task '{}' as done", task.description)?,
TaskState::Open => writeln!(out, "Marking task '{}' as open", task.description)?,
TaskState::Canceled(_) => {
writeln!(out, "Marking task '{}' as canceled", task.description)?
}
},
ChangeTaskPriority(_uuid, ref priority) => {
writeln!(
out,
"Changed priority of task '{}' to {}",
task.description, priority
)?;
}
};
}
Ok(())
}
}
pub type ScopeName = String;
pub type NumericalIds = HashMap<ScopeName, BTreeMap<u64, Uuid>>;
pub struct Model {
// TODO: hide `tasks` and add `archived_tasks`
pub tasks: HashMap<Uuid, Task>,
pub applied_effects: Vec<Effect>,
pub numerical_ids: NumericalIds,
is_dirty: bool,
}
impl Model {
pub fn new() -> Self {
Model {
tasks: HashMap::new(),
applied_effects: Vec::new(),
numerical_ids: NumericalIds::new(),
is_dirty: false,
}
}
pub fn from_effects(effects: &[Effect]) -> Self |
pub fn apply_effect(&mut self, effect: &Effect) -> () {
use Effect::*;
match effect.clone() {
AddTask(task) => {
self.add_task(task);
}
ChangeTaskTags {
uuid,
added,
removed,
} => {
self.change_task_tags(&uuid, added, removed);
}
ChangeTaskState(uuid, state) => {
self.change_task_state(&uuid, state);
}
ChangeTaskPriority(uuid, p) => {
self.change_task_priority(&uuid, p);
}
DeleteTask(uuid) => {
self.delete_task(&uuid);
}
}
self.applied_effects.push(effect.clone());
self.is_dirty = true;
}
fn add_task(&mut self, t: Task) -> () {
if self.tasks.insert(t.uuid, t).is_some() {
panic!("UUID collision in Model::add_task");
}
}
fn delete_task(&mut self, u: &Uuid) -> Option<Task> {
self.tasks.remove(&u)
}
fn change_task_state(&mut self, u: &Uuid, state: TaskState) {
self.tasks.get_mut(u).expect("failed to get task").status = state;
}
fn change_task_priority(&mut self, u: &Uuid, priority: Priority) {
self.tasks.get_mut(u).expect("failed to get task").priority = priority;
}
fn change_task_tags(&mut self, u: &Uuid, added: Tags, removed: Tags) {
let ref mut tags = self.tasks.get_mut(u).expect("failed to get task").tags;
for t in removed {
tags.remove(&t);
}
for t in added {
tags.insert(t);
}
}
}
// Numerical-ID Handling
impl Model {
pub fn short_task_id(&self, scope_name: &str, task_id: &Uuid) -> Option<u64> {
self.numerical_ids
.get(scope_name)
.and_then(|ids| ids.iter().find(|&(_, uuid)| uuid == task_id))
.map(|(n, _)| *n)
}
pub fn recalculate_numerical_ids(&mut self, scope: &str, task_ids: &[Uuid]) {
info!("Recalculating numerical-ids for scope {}", scope);
self.is_dirty = true;
let ids = task_ids
.iter()
.enumerate()
.map(|(n, uuid)| ((n as u64) + 1, uuid.clone()))
.collect();
self.numerical_ids.insert(scope.into(), ids);
}
pub fn incremental_numerical_id(&mut self, scope: &str, task: &Uuid) -> u64 {
debug!(
"Calculating incremental numerical-id for {} in scope {}",
task, scope
);
assert!(self.get_task(task).is_some());
self.short_task_id(scope, task).unwrap_or_else(|| {
self.is_dirty = true;
let numerical_ids = self.numerical_ids
.entry(scope.into())
.or_insert(BTreeMap::new());
let n = numerical_ids.iter().map(|(id, _)| *id).max().unwrap_or(0) + 1;
numerical_ids.insert(n, task.clone());
n
})
}
}
#[derive(Debug, PartialEq, Eq, Fail)]
pub enum FindTaskError {
#[fail(display = "Couldn't find task")]
TaskNotFound,
#[fail(display = "Found multiple tasks")]
MultipleResults,
}
pub struct TaskIter<'a> {
tasks: Vec<&'a Task>,
pos: usize,
}
impl<'a> Iterator for TaskIter<'a> {
type Item = &'a Task;
fn next(&mut self) -> Option<Self::Item> {
let v = self.tasks.get(self.pos);
self.pos += 1;
v.map(|x| *x)
}
}
impl Model {
pub fn all_tasks<'a>(&'a self) -> TaskIter<'a> {
let mut v: Vec<&Task> = self.tasks.values().collect();
v.sort_by(|a, b| b.cmp(a));
TaskIter { tasks: v, pos: 0 }
}
pub fn get_task<'a>(&'a self, uuid: &Uuid) -> Option<&'a Task> {
self.tasks.get(uuid)
}
pub fn find_task<'a>(
&'a self,
scope_name: &str,
task_ref: &TaskRef,
) -> Result<&'a Task, FindTaskError> {
let uuids: Vec<&Uuid> = match *task_ref {
TaskRef::FullUUID(ref u) => vec![u],
TaskRef::ShortUUID(ref s) => self.tasks
.keys()
.filter(|uuid| uuid.simple().to_string().starts_with(s))
.collect(),
TaskRef::Numerical(ref n) => {
match self.numerical_ids.get(scope_name).and_then(|x| x.get(n)) {
Some(uuid) => vec![uuid],
None => vec![],
}
}
};
use self::FindTaskError::*;
match uuids.len() {
0 => Err(TaskNotFound),
1 => self.get_task(uuids[0])
.map_or(Err(FindTaskError::TaskNotFound), Ok),
_ => Err(MultipleResults),
}
}
pub fn is_dirty(&self) -> bool {
self.is_dirty
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono;
use std::str::FromStr;
use uuid::Uuid;
use {Priority, Task, TaskState};
#[test]
fn test_add_delete_task() {
let mut m = Model::new();
let t = Task::new("foo");
m.add_task(t.clone());
assert_eq!(m.get_task(&t.uuid), Some(&t));
assert_eq!(m.delete_task(&t.uuid), Some(t.clone()));
assert_eq!(m.get_task(&t.uuid), None);
}
#[test]
fn test_change_task_task() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.tasks[&uuid].status, TaskState::Open);
let s = TaskState::Done(chrono::Utc::now());
m.change_task_state(&uuid, s);
assert_eq!(m.tasks[&uuid].status, s);
}
#[test]
fn test_change_task_priority() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.tasks[&uuid].priority, Priority::Default);
m.change_task_priority(&uuid, Priority::High);
assert_eq!(m.tasks[&uuid].priority, Priority::High);
}
#[test]
fn test_numerical_ref() {
assert_eq!(TaskRef::from_str("42"), Ok(TaskRef::Numerical(42)));
assert_eq!(TaskRef::from_str("0"), Ok(TaskRef::Numerical(0)));
assert!(TaskRef::from_str("-0").is_err());
}
#[test]
fn test_short_uuid_ref() {
for s in vec!["abcdef", "123abc", "000000"] {
assert_eq!(TaskRef::from_str(s), Ok(TaskRef::ShortUUID(s.into())));
}
assert!(
TaskRef::from_str("abcde").is_err(),
"Short-UUID with len of 5"
);
assert!(
TaskRef::from_str("abcdef1").is_err(),
"Short-UUID with len of 7"
);
// Make sure that short-UUIDs are preferred
assert_eq!(
TaskRef::from_str("123456"),
Ok(TaskRef::ShortUUID("123456".into()))
);
// non-base16 symbols
assert!(TaskRef::from_str("rivers").is_err());
}
#[test]
fn test_full_uuid_ref() {
for _ in 1..100 {
let uuid = Uuid::new_v4();
assert_eq!(
TaskRef::from_str(&uuid.hyphenated().to_string()),
Ok(TaskRef::FullUUID(uuid))
);
}
}
#[test]
fn test_incremental_numerical_id_empty_scope() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.incremental_numerical_id("defaut", &uuid), 1);
}
#[test]
#[should_panic]
fn test_incremental_numerical_id_unknown_task() {
let mut m = Model::new();
m.incremental_numerical_id("default", &Uuid::new_v4());
}
#[test]
fn test_incremental_numerical_id_already_exists() {
let mut m = Model::new();
let t = Task::new("foo");
m.add_task(t.clone());
m.recalculate_numerical_ids("default", &vec![t.uuid]);
assert_eq!(m.incremental_numerical_id("default", &t.uuid), 1);
}
#[test]
fn test_incremental_numerical_id() {
let mut m = Model::new();
let t = Task::new("foo");
let t2 = Task::new("bar");
m.add_task(t.clone());
m.recalculate_numerical_ids("default", &vec![t.uuid]);
m.add_task(t2.clone());
assert_eq!(m.short_task_id("default", &t.uuid), Some(1));
assert_eq!(m.incremental_numerical_id("default", &t2.uuid), 2);
assert_eq!(m.short_task_id("default", &t2.uuid), Some(2));
}
}
| {
let mut model = Self::new();
for effect in effects {
model.apply_effect(&effect)
}
model.is_dirty = false;
model
} | identifier_body |
model.rs | use std::collections::{BTreeMap, HashMap};
use task::*;
use task_ref::TaskRef;
use std::io;
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum Effect {
AddTask(Task),
ChangeTaskTags {
uuid: Uuid,
added: Tags,
removed: Tags,
},
ChangeTaskState(Uuid, TaskState),
ChangeTaskPriority(Uuid, Priority),
DeleteTask(Uuid),
// Undo,
}
impl Effect {
fn task_id<'a>(&'a self) -> &'a Uuid {
use Effect::*;
match *self {
AddTask(Task { ref uuid,.. }) => uuid,
ChangeTaskTags { ref uuid,.. } => uuid,
ChangeTaskState(ref u, _) => u,
ChangeTaskPriority(ref u, _) => u,
DeleteTask(ref u) => u,
}
}
pub fn print(&self, model: &Model, out: &mut io::Write) -> io::Result<()> {
use Effect::*;
// Special handling for `DeleteTask` as `task` isn't in
// `model` anymore
if let DeleteTask(_) = self {
writeln!(out, "Deleted task {}", self.task_id())?;
} else {
let task = model.get_task(self.task_id()).unwrap(); // TODO
match self {
DeleteTask(_) => unreachable!(),
AddTask(_) => writeln!(out, "Added Task {}", task.short_id())?,
ChangeTaskTags {
ref added,
ref removed,
..
} => {
if!added.is_empty() {
writeln!(out, "Added tags {:?}", added)?;
}
if!removed.is_empty() {
writeln!(out, "Removed tags {:?}", removed)?;
}
}
ChangeTaskState(_uuid, ref state) => match *state {
TaskState::Done(_) => writeln!(out, "Marking task '{}' as done", task.description)?,
TaskState::Open => writeln!(out, "Marking task '{}' as open", task.description)?,
TaskState::Canceled(_) => {
writeln!(out, "Marking task '{}' as canceled", task.description)?
}
},
ChangeTaskPriority(_uuid, ref priority) => {
writeln!(
out,
"Changed priority of task '{}' to {}",
task.description, priority
)?;
}
};
}
Ok(())
}
}
pub type ScopeName = String;
pub type NumericalIds = HashMap<ScopeName, BTreeMap<u64, Uuid>>;
pub struct Model {
// TODO: hide `tasks` and add `archived_tasks`
pub tasks: HashMap<Uuid, Task>,
pub applied_effects: Vec<Effect>,
pub numerical_ids: NumericalIds,
is_dirty: bool,
}
impl Model {
pub fn new() -> Self {
Model {
tasks: HashMap::new(),
applied_effects: Vec::new(),
numerical_ids: NumericalIds::new(),
is_dirty: false,
}
}
pub fn from_effects(effects: &[Effect]) -> Self {
let mut model = Self::new();
for effect in effects {
model.apply_effect(&effect)
}
model.is_dirty = false;
model
}
pub fn apply_effect(&mut self, effect: &Effect) -> () {
use Effect::*;
match effect.clone() {
AddTask(task) => |
ChangeTaskTags {
uuid,
added,
removed,
} => {
self.change_task_tags(&uuid, added, removed);
}
ChangeTaskState(uuid, state) => {
self.change_task_state(&uuid, state);
}
ChangeTaskPriority(uuid, p) => {
self.change_task_priority(&uuid, p);
}
DeleteTask(uuid) => {
self.delete_task(&uuid);
}
}
self.applied_effects.push(effect.clone());
self.is_dirty = true;
}
fn add_task(&mut self, t: Task) -> () {
if self.tasks.insert(t.uuid, t).is_some() {
panic!("UUID collision in Model::add_task");
}
}
fn delete_task(&mut self, u: &Uuid) -> Option<Task> {
self.tasks.remove(&u)
}
fn change_task_state(&mut self, u: &Uuid, state: TaskState) {
self.tasks.get_mut(u).expect("failed to get task").status = state;
}
fn change_task_priority(&mut self, u: &Uuid, priority: Priority) {
self.tasks.get_mut(u).expect("failed to get task").priority = priority;
}
fn change_task_tags(&mut self, u: &Uuid, added: Tags, removed: Tags) {
let ref mut tags = self.tasks.get_mut(u).expect("failed to get task").tags;
for t in removed {
tags.remove(&t);
}
for t in added {
tags.insert(t);
}
}
}
// Numerical-ID Handling
impl Model {
pub fn short_task_id(&self, scope_name: &str, task_id: &Uuid) -> Option<u64> {
self.numerical_ids
.get(scope_name)
.and_then(|ids| ids.iter().find(|&(_, uuid)| uuid == task_id))
.map(|(n, _)| *n)
}
pub fn recalculate_numerical_ids(&mut self, scope: &str, task_ids: &[Uuid]) {
info!("Recalculating numerical-ids for scope {}", scope);
self.is_dirty = true;
let ids = task_ids
.iter()
.enumerate()
.map(|(n, uuid)| ((n as u64) + 1, uuid.clone()))
.collect();
self.numerical_ids.insert(scope.into(), ids);
}
pub fn incremental_numerical_id(&mut self, scope: &str, task: &Uuid) -> u64 {
debug!(
"Calculating incremental numerical-id for {} in scope {}",
task, scope
);
assert!(self.get_task(task).is_some());
self.short_task_id(scope, task).unwrap_or_else(|| {
self.is_dirty = true;
let numerical_ids = self.numerical_ids
.entry(scope.into())
.or_insert(BTreeMap::new());
let n = numerical_ids.iter().map(|(id, _)| *id).max().unwrap_or(0) + 1;
numerical_ids.insert(n, task.clone());
n
})
}
}
#[derive(Debug, PartialEq, Eq, Fail)]
pub enum FindTaskError {
#[fail(display = "Couldn't find task")]
TaskNotFound,
#[fail(display = "Found multiple tasks")]
MultipleResults,
}
pub struct TaskIter<'a> {
tasks: Vec<&'a Task>,
pos: usize,
}
impl<'a> Iterator for TaskIter<'a> {
type Item = &'a Task;
fn next(&mut self) -> Option<Self::Item> {
let v = self.tasks.get(self.pos);
self.pos += 1;
v.map(|x| *x)
}
}
impl Model {
pub fn all_tasks<'a>(&'a self) -> TaskIter<'a> {
let mut v: Vec<&Task> = self.tasks.values().collect();
v.sort_by(|a, b| b.cmp(a));
TaskIter { tasks: v, pos: 0 }
}
pub fn get_task<'a>(&'a self, uuid: &Uuid) -> Option<&'a Task> {
self.tasks.get(uuid)
}
pub fn find_task<'a>(
&'a self,
scope_name: &str,
task_ref: &TaskRef,
) -> Result<&'a Task, FindTaskError> {
let uuids: Vec<&Uuid> = match *task_ref {
TaskRef::FullUUID(ref u) => vec![u],
TaskRef::ShortUUID(ref s) => self.tasks
.keys()
.filter(|uuid| uuid.simple().to_string().starts_with(s))
.collect(),
TaskRef::Numerical(ref n) => {
match self.numerical_ids.get(scope_name).and_then(|x| x.get(n)) {
Some(uuid) => vec![uuid],
None => vec![],
}
}
};
use self::FindTaskError::*;
match uuids.len() {
0 => Err(TaskNotFound),
1 => self.get_task(uuids[0])
.map_or(Err(FindTaskError::TaskNotFound), Ok),
_ => Err(MultipleResults),
}
}
pub fn is_dirty(&self) -> bool {
self.is_dirty
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono;
use std::str::FromStr;
use uuid::Uuid;
use {Priority, Task, TaskState};
#[test]
fn test_add_delete_task() {
let mut m = Model::new();
let t = Task::new("foo");
m.add_task(t.clone());
assert_eq!(m.get_task(&t.uuid), Some(&t));
assert_eq!(m.delete_task(&t.uuid), Some(t.clone()));
assert_eq!(m.get_task(&t.uuid), None);
}
#[test]
fn test_change_task_task() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.tasks[&uuid].status, TaskState::Open);
let s = TaskState::Done(chrono::Utc::now());
m.change_task_state(&uuid, s);
assert_eq!(m.tasks[&uuid].status, s);
}
#[test]
fn test_change_task_priority() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.tasks[&uuid].priority, Priority::Default);
m.change_task_priority(&uuid, Priority::High);
assert_eq!(m.tasks[&uuid].priority, Priority::High);
}
#[test]
fn test_numerical_ref() {
assert_eq!(TaskRef::from_str("42"), Ok(TaskRef::Numerical(42)));
assert_eq!(TaskRef::from_str("0"), Ok(TaskRef::Numerical(0)));
assert!(TaskRef::from_str("-0").is_err());
}
#[test]
fn test_short_uuid_ref() {
for s in vec!["abcdef", "123abc", "000000"] {
assert_eq!(TaskRef::from_str(s), Ok(TaskRef::ShortUUID(s.into())));
}
assert!(
TaskRef::from_str("abcde").is_err(),
"Short-UUID with len of 5"
);
assert!(
TaskRef::from_str("abcdef1").is_err(),
"Short-UUID with len of 7"
);
// Make sure that short-UUIDs are preferred
assert_eq!(
TaskRef::from_str("123456"),
Ok(TaskRef::ShortUUID("123456".into()))
);
// non-base16 symbols
assert!(TaskRef::from_str("rivers").is_err());
}
#[test]
fn test_full_uuid_ref() {
for _ in 1..100 {
let uuid = Uuid::new_v4();
assert_eq!(
TaskRef::from_str(&uuid.hyphenated().to_string()),
Ok(TaskRef::FullUUID(uuid))
);
}
}
#[test]
fn test_incremental_numerical_id_empty_scope() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.incremental_numerical_id("defaut", &uuid), 1);
}
#[test]
#[should_panic]
fn test_incremental_numerical_id_unknown_task() {
let mut m = Model::new();
m.incremental_numerical_id("default", &Uuid::new_v4());
}
#[test]
fn test_incremental_numerical_id_already_exists() {
let mut m = Model::new();
let t = Task::new("foo");
m.add_task(t.clone());
m.recalculate_numerical_ids("default", &vec![t.uuid]);
assert_eq!(m.incremental_numerical_id("default", &t.uuid), 1);
}
#[test]
fn test_incremental_numerical_id() {
let mut m = Model::new();
let t = Task::new("foo");
let t2 = Task::new("bar");
m.add_task(t.clone());
m.recalculate_numerical_ids("default", &vec![t.uuid]);
m.add_task(t2.clone());
assert_eq!(m.short_task_id("default", &t.uuid), Some(1));
assert_eq!(m.incremental_numerical_id("default", &t2.uuid), 2);
assert_eq!(m.short_task_id("default", &t2.uuid), Some(2));
}
}
| {
self.add_task(task);
} | conditional_block |
model.rs | use std::collections::{BTreeMap, HashMap};
use task::*;
use task_ref::TaskRef;
use std::io;
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum Effect {
AddTask(Task),
ChangeTaskTags {
uuid: Uuid,
added: Tags,
removed: Tags,
},
ChangeTaskState(Uuid, TaskState),
ChangeTaskPriority(Uuid, Priority),
DeleteTask(Uuid),
// Undo,
}
impl Effect {
fn task_id<'a>(&'a self) -> &'a Uuid {
use Effect::*;
match *self {
AddTask(Task { ref uuid,.. }) => uuid,
ChangeTaskTags { ref uuid,.. } => uuid,
ChangeTaskState(ref u, _) => u,
ChangeTaskPriority(ref u, _) => u,
DeleteTask(ref u) => u,
}
}
pub fn print(&self, model: &Model, out: &mut io::Write) -> io::Result<()> {
use Effect::*;
// Special handling for `DeleteTask` as `task` isn't in
// `model` anymore
if let DeleteTask(_) = self {
writeln!(out, "Deleted task {}", self.task_id())?;
} else {
let task = model.get_task(self.task_id()).unwrap(); // TODO
match self {
DeleteTask(_) => unreachable!(),
AddTask(_) => writeln!(out, "Added Task {}", task.short_id())?,
ChangeTaskTags {
ref added,
ref removed,
..
} => {
if!added.is_empty() {
writeln!(out, "Added tags {:?}", added)?;
}
if!removed.is_empty() {
writeln!(out, "Removed tags {:?}", removed)?;
}
}
ChangeTaskState(_uuid, ref state) => match *state {
TaskState::Done(_) => writeln!(out, "Marking task '{}' as done", task.description)?,
TaskState::Open => writeln!(out, "Marking task '{}' as open", task.description)?,
TaskState::Canceled(_) => {
writeln!(out, "Marking task '{}' as canceled", task.description)?
}
},
ChangeTaskPriority(_uuid, ref priority) => {
writeln!(
out,
"Changed priority of task '{}' to {}",
task.description, priority
)?;
}
};
}
Ok(())
}
}
pub type ScopeName = String;
pub type NumericalIds = HashMap<ScopeName, BTreeMap<u64, Uuid>>;
pub struct Model {
// TODO: hide `tasks` and add `archived_tasks`
pub tasks: HashMap<Uuid, Task>,
pub applied_effects: Vec<Effect>,
pub numerical_ids: NumericalIds,
is_dirty: bool,
}
impl Model {
pub fn new() -> Self {
Model {
tasks: HashMap::new(),
applied_effects: Vec::new(),
numerical_ids: NumericalIds::new(),
is_dirty: false,
}
}
pub fn from_effects(effects: &[Effect]) -> Self {
let mut model = Self::new();
for effect in effects {
model.apply_effect(&effect)
}
model.is_dirty = false;
model
}
pub fn apply_effect(&mut self, effect: &Effect) -> () {
use Effect::*;
match effect.clone() {
AddTask(task) => {
self.add_task(task);
}
ChangeTaskTags {
uuid,
added,
removed,
} => {
self.change_task_tags(&uuid, added, removed);
}
ChangeTaskState(uuid, state) => {
self.change_task_state(&uuid, state);
}
ChangeTaskPriority(uuid, p) => {
self.change_task_priority(&uuid, p);
}
DeleteTask(uuid) => {
self.delete_task(&uuid);
}
}
self.applied_effects.push(effect.clone());
self.is_dirty = true;
}
fn add_task(&mut self, t: Task) -> () {
if self.tasks.insert(t.uuid, t).is_some() {
panic!("UUID collision in Model::add_task");
}
}
fn delete_task(&mut self, u: &Uuid) -> Option<Task> {
self.tasks.remove(&u)
}
fn change_task_state(&mut self, u: &Uuid, state: TaskState) {
self.tasks.get_mut(u).expect("failed to get task").status = state;
}
fn change_task_priority(&mut self, u: &Uuid, priority: Priority) {
self.tasks.get_mut(u).expect("failed to get task").priority = priority;
}
fn change_task_tags(&mut self, u: &Uuid, added: Tags, removed: Tags) {
let ref mut tags = self.tasks.get_mut(u).expect("failed to get task").tags;
for t in removed {
tags.remove(&t);
}
for t in added {
tags.insert(t);
}
}
}
// Numerical-ID Handling
impl Model {
pub fn short_task_id(&self, scope_name: &str, task_id: &Uuid) -> Option<u64> {
self.numerical_ids
.get(scope_name)
.and_then(|ids| ids.iter().find(|&(_, uuid)| uuid == task_id))
.map(|(n, _)| *n)
}
pub fn recalculate_numerical_ids(&mut self, scope: &str, task_ids: &[Uuid]) {
info!("Recalculating numerical-ids for scope {}", scope);
self.is_dirty = true;
let ids = task_ids
.iter()
.enumerate()
.map(|(n, uuid)| ((n as u64) + 1, uuid.clone()))
.collect();
self.numerical_ids.insert(scope.into(), ids);
}
pub fn incremental_numerical_id(&mut self, scope: &str, task: &Uuid) -> u64 {
debug!(
"Calculating incremental numerical-id for {} in scope {}",
task, scope
);
assert!(self.get_task(task).is_some());
self.short_task_id(scope, task).unwrap_or_else(|| {
self.is_dirty = true;
let numerical_ids = self.numerical_ids
.entry(scope.into())
.or_insert(BTreeMap::new());
let n = numerical_ids.iter().map(|(id, _)| *id).max().unwrap_or(0) + 1;
numerical_ids.insert(n, task.clone());
n
})
}
}
#[derive(Debug, PartialEq, Eq, Fail)]
pub enum FindTaskError {
#[fail(display = "Couldn't find task")]
TaskNotFound,
#[fail(display = "Found multiple tasks")]
MultipleResults,
}
pub struct TaskIter<'a> {
tasks: Vec<&'a Task>,
pos: usize,
}
impl<'a> Iterator for TaskIter<'a> {
type Item = &'a Task;
fn next(&mut self) -> Option<Self::Item> {
let v = self.tasks.get(self.pos);
self.pos += 1;
v.map(|x| *x)
}
}
impl Model {
pub fn all_tasks<'a>(&'a self) -> TaskIter<'a> {
let mut v: Vec<&Task> = self.tasks.values().collect();
v.sort_by(|a, b| b.cmp(a));
TaskIter { tasks: v, pos: 0 }
}
pub fn get_task<'a>(&'a self, uuid: &Uuid) -> Option<&'a Task> {
self.tasks.get(uuid)
}
pub fn find_task<'a>(
&'a self,
scope_name: &str,
task_ref: &TaskRef,
) -> Result<&'a Task, FindTaskError> {
let uuids: Vec<&Uuid> = match *task_ref {
TaskRef::FullUUID(ref u) => vec![u],
TaskRef::ShortUUID(ref s) => self.tasks
.keys()
.filter(|uuid| uuid.simple().to_string().starts_with(s))
.collect(),
TaskRef::Numerical(ref n) => {
match self.numerical_ids.get(scope_name).and_then(|x| x.get(n)) {
Some(uuid) => vec![uuid],
None => vec![],
}
}
};
use self::FindTaskError::*;
match uuids.len() {
0 => Err(TaskNotFound),
1 => self.get_task(uuids[0])
.map_or(Err(FindTaskError::TaskNotFound), Ok),
_ => Err(MultipleResults),
}
}
pub fn | (&self) -> bool {
self.is_dirty
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono;
use std::str::FromStr;
use uuid::Uuid;
use {Priority, Task, TaskState};
#[test]
fn test_add_delete_task() {
let mut m = Model::new();
let t = Task::new("foo");
m.add_task(t.clone());
assert_eq!(m.get_task(&t.uuid), Some(&t));
assert_eq!(m.delete_task(&t.uuid), Some(t.clone()));
assert_eq!(m.get_task(&t.uuid), None);
}
#[test]
fn test_change_task_task() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.tasks[&uuid].status, TaskState::Open);
let s = TaskState::Done(chrono::Utc::now());
m.change_task_state(&uuid, s);
assert_eq!(m.tasks[&uuid].status, s);
}
#[test]
fn test_change_task_priority() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.tasks[&uuid].priority, Priority::Default);
m.change_task_priority(&uuid, Priority::High);
assert_eq!(m.tasks[&uuid].priority, Priority::High);
}
#[test]
fn test_numerical_ref() {
assert_eq!(TaskRef::from_str("42"), Ok(TaskRef::Numerical(42)));
assert_eq!(TaskRef::from_str("0"), Ok(TaskRef::Numerical(0)));
assert!(TaskRef::from_str("-0").is_err());
}
#[test]
fn test_short_uuid_ref() {
for s in vec!["abcdef", "123abc", "000000"] {
assert_eq!(TaskRef::from_str(s), Ok(TaskRef::ShortUUID(s.into())));
}
assert!(
TaskRef::from_str("abcde").is_err(),
"Short-UUID with len of 5"
);
assert!(
TaskRef::from_str("abcdef1").is_err(),
"Short-UUID with len of 7"
);
// Make sure that short-UUIDs are preferred
assert_eq!(
TaskRef::from_str("123456"),
Ok(TaskRef::ShortUUID("123456".into()))
);
// non-base16 symbols
assert!(TaskRef::from_str("rivers").is_err());
}
#[test]
fn test_full_uuid_ref() {
for _ in 1..100 {
let uuid = Uuid::new_v4();
assert_eq!(
TaskRef::from_str(&uuid.hyphenated().to_string()),
Ok(TaskRef::FullUUID(uuid))
);
}
}
#[test]
fn test_incremental_numerical_id_empty_scope() {
let mut m = Model::new();
let t = Task::new("foo");
let uuid = t.uuid.clone();
m.add_task(t.clone());
assert_eq!(m.incremental_numerical_id("defaut", &uuid), 1);
}
#[test]
#[should_panic]
fn test_incremental_numerical_id_unknown_task() {
let mut m = Model::new();
m.incremental_numerical_id("default", &Uuid::new_v4());
}
#[test]
fn test_incremental_numerical_id_already_exists() {
let mut m = Model::new();
let t = Task::new("foo");
m.add_task(t.clone());
m.recalculate_numerical_ids("default", &vec![t.uuid]);
assert_eq!(m.incremental_numerical_id("default", &t.uuid), 1);
}
#[test]
fn test_incremental_numerical_id() {
let mut m = Model::new();
let t = Task::new("foo");
let t2 = Task::new("bar");
m.add_task(t.clone());
m.recalculate_numerical_ids("default", &vec![t.uuid]);
m.add_task(t2.clone());
assert_eq!(m.short_task_id("default", &t.uuid), Some(1));
assert_eq!(m.incremental_numerical_id("default", &t2.uuid), 2);
assert_eq!(m.short_task_id("default", &t2.uuid), Some(2));
}
}
| is_dirty | identifier_name |
lib.rs | #![warn(rust_2018_idioms)]
#![cfg_attr(feature = "strict", deny(warnings))]
use std::num::NonZeroUsize;
use proc_macro::TokenStream;
use quote::{format_ident, quote};
use syn::punctuated::Punctuated;
use syn::spanned::Spanned;
use syn::{
parse_macro_input, Error, Expr, ExprLit, ExprRange, FnArg, Index, ItemFn, Lit, LitStr, Pat,
RangeLimits, ReturnType, Token, Type,
};
/// Creates an inline hook at a C# method.
///
/// # Panics
///
/// * `install` will panic if the class or method was not found.
/// * `original` will panic if the hook has not yet been installed.
///
/// # Examples
///
/// ```no_run
/// use quest_hook::inline_hook::hook;
/// use quest_hook::libil2cpp::Il2CppObject;
/// use log::info;
///
/// #[hook("", "MainSettingsModelSO", "OnEnable")]
/// fn on_enable(this: &Il2CppObject) {
/// info!("MainSettingsModelSO.OnEnable was called!");
///
/// on_enable.original(this); // Call the original C# method
/// }
///
/// #[no_mangle]
/// pub extern "C" fn load() {
/// info!("Installing hooks!");
///
/// on_enable.install(); // Install the hook
///
/// info!("Installed hooks!");
/// }
/// ```
#[proc_macro_attribute]
pub fn | (attr: TokenStream, item: TokenStream) -> TokenStream {
let punctuated_args =
parse_macro_input!(attr with Punctuated<LitStr, Token![,]>::parse_separated_nonempty);
let input = parse_macro_input!(item as ItemFn);
match create_hook(punctuated_args, input) {
Ok(ts) => ts,
Err(err) => err.to_compile_error().into(),
}
}
fn create_hook(
punctuated_args: Punctuated<LitStr, Token![,]>,
input: ItemFn,
) -> Result<TokenStream, Error> {
let args: Vec<String> = punctuated_args.iter().map(LitStr::value).collect();
let (namespace, class, method) = match args.as_slice() {
[n, c, m] => (n, c, m),
_ => {
let msg = format!("Expected 3 arguments, found {}", args.len());
return Err(Error::new_spanned(punctuated_args, msg));
}
};
let ItemFn { sig, block,.. } = input;
let name = sig.ident;
let return_type = sig.output;
let typecheck_return_type = match &return_type {
ReturnType::Default => quote! { () },
ReturnType::Type(_, ty) => quote! { #ty },
};
let hook_name = format_ident!("{}_hook", name);
let hook_args = sig.inputs;
let mut this_arg_type = None;
let mut num_hook_args: usize = 0;
for hook_arg in &hook_args {
let arg_type = match hook_arg {
FnArg::Typed(arg_type) => arg_type,
FnArg::Receiver(_) => {
let msg = "Hook argument cannot be `self`";
return Err(Error::new_spanned(hook_arg, msg));
}
};
match &*arg_type.pat {
// `il2cpp_class_get_method_from_name` does not count `this` in its argument count
Pat::Ident(pat_ident) if pat_ident.ident == "this" => {
if this_arg_type.is_some() {
let msg = "There cannot be more than one `this` argument.";
return Err(Error::new_spanned(hook_arg, msg));
}
if num_hook_args > 0 {
let msg = "`this` must be the first argument.";
return Err(Error::new_spanned(hook_arg, msg));
}
this_arg_type = Some(arg_type.ty.clone());
}
_ => num_hook_args += 1,
}
}
let hook_struct_name = format_ident!("{}_Struct", name);
let mut hook_args_untyped: Punctuated<Pat, Token![,]> = Punctuated::new();
let mut typecheck_arg_types: Punctuated<Type, Token![,]> = Punctuated::new();
for arg in &hook_args {
if let FnArg::Typed(arg) = arg {
hook_args_untyped.push((*arg.pat).clone());
match &*arg.pat {
Pat::Ident(pat_ident) if pat_ident.ident == "this" => continue,
_ => typecheck_arg_types.push((*arg.ty).clone()),
}
}
}
let typecheck_this_type = match &this_arg_type {
None => quote! { () },
Some(ty) => quote! { #ty },
};
let tokens = quote! {
pub extern "C" fn #hook_name ( #hook_args ) #return_type #block
#[allow(non_camel_case_types)]
struct #hook_struct_name {
original: ::std::sync::atomic::AtomicPtr<()>,
namespace: &'static str,
class_name: &'static str,
method_name: &'static str,
parameters_count: usize,
}
impl #hook_struct_name {
fn install(&self) {
use ::quest_hook::libil2cpp::WrapRaw;
let class = ::quest_hook::libil2cpp::Il2CppClass::find(self.namespace, self.class_name).expect("Class not found");
let method = class.find_method_callee::<
#typecheck_this_type,
( #typecheck_arg_types ),
#typecheck_return_type,
#num_hook_args
>(self.method_name).expect("Method not found");
let mut temp = ::std::ptr::null_mut();
unsafe {
::quest_hook::inline_hook::A64HookFunction(
::std::mem::transmute::<unsafe extern "C" fn(), *mut ::std::ffi::c_void>(method.raw().methodPointer.unwrap()),
::std::mem::transmute::<extern "C" fn( #hook_args ) #return_type, *mut ::std::ffi::c_void>( #hook_name ),
&mut temp,
);
self.original.store(
::std::mem::transmute::<*mut ::std::ffi::c_void, *mut ()>(temp),
::std::sync::atomic::Ordering::Relaxed
);
}
}
fn original(&self, #hook_args ) #return_type {
let ptr = self.original.load(::std::sync::atomic::Ordering::Relaxed);
let original = unsafe {
::std::mem::transmute::<*const (), Option<extern "C" fn( #hook_args ) #return_type >>(ptr)
};
(original.expect("Hook is not installed"))( #hook_args_untyped )
}
fn hook(&self, #hook_args ) #return_type {
#hook_name( #hook_args_untyped )
}
}
impl ::quest_hook::Hook for #hook_struct_name {
fn install(&self) {
self.install()
}
fn namespace(&self) -> &'static str {
self.namespace
}
fn class_name(&self) -> &'static str {
self.class_name
}
fn method_name(&self) -> &'static str {
self.method_name
}
fn parameters_count(&self) -> usize {
self.parameters_count
}
fn hook(&self) -> *mut () {
unsafe {
::std::mem::transmute::<extern "C" fn( #hook_args ) #return_type, *mut ()>( #hook_name )
}
}
fn original(&self) -> *mut () {
self.original.load(::std::sync::atomic::Ordering::Relaxed)
}
}
#[allow(non_upper_case_globals)]
static #name: #hook_struct_name = #hook_struct_name {
original: ::std::sync::atomic::AtomicPtr::new(::std::ptr::null_mut()),
namespace: #namespace,
class_name: #class,
method_name: #method,
parameters_count: #num_hook_args as usize
};
};
Ok(tokens.into())
}
#[doc(hidden)]
#[proc_macro]
pub fn impl_arguments_parameters(input: TokenStream) -> TokenStream {
let range = parse_macro_input!(input as ExprRange);
match create_impl_arguments_parameters(range) {
Ok(ts) => ts,
Err(err) => err.to_compile_error().into(),
}
}
fn create_impl_arguments_parameters(range: ExprRange) -> Result<TokenStream, Error> {
let span = range.span();
let start = range
.from
.ok_or_else(|| Error::new(span, "Tuple length range must have a lower bound"))?;
let start = parse_range_bound(*start)?;
let end = range
.to
.ok_or_else(|| Error::new(span, "Tuple length range must have an upper bound"))?;
let end = parse_range_bound(*end)?;
let range = match range.limits {
RangeLimits::HalfOpen(_) if end <= start => {
return Err(Error::new(span, "Tuple length range must be valid"))
}
RangeLimits::HalfOpen(_) => start..end,
RangeLimits::Closed(_) if end < start => {
return Err(Error::new(span, "Tuple length range must be valid"))
}
RangeLimits::Closed(_) => start..(end + 1),
};
let mut ts = TokenStream::new();
for n in range {
let generic_params_argument = (1..=n).map(|n| format_ident!("A{}", n));
let matches_argument = generic_params_argument
.clone()
.enumerate()
.map(|(n, gp)| quote!(<#gp>::matches(args[#n].ty())));
let invokables = (0..n).map(Index::from).map(|n| quote!(self.#n.invokable()));
let generic_params_parameter = (1..=n).map(|n| format_ident!("P{}", n));
let matches_parameter = generic_params_parameter
.clone()
.enumerate()
.map(|(n, gp)| quote!(<#gp>::matches(params[#n].ty())));
let generic_params_argument_tuple = generic_params_argument.clone();
let generic_params_argument_where = generic_params_argument.clone();
let generic_params_argument_type = generic_params_argument.clone();
let generic_params_parameter_tuple = generic_params_parameter.clone();
let generic_params_parameter_where = generic_params_parameter.clone();
let generic_params_parameter_type = generic_params_parameter.clone();
let impl_ts = quote! {
unsafe impl<#(#generic_params_argument),*> Arguments<#n> for (#(#generic_params_argument_tuple,)*)
where
#(#generic_params_argument_where: Argument),*
{
type Type = (#(#generic_params_argument_type::Type,)*);
fn matches(args: &[&ParameterInfo]) -> bool {
args.len() == #n #( && #matches_argument)*
}
fn invokable(&self) -> [*mut c_void; #n] {
[#(#invokables),*]
}
}
unsafe impl<#(#generic_params_parameter),*> Parameters<#n> for (#(#generic_params_parameter_tuple,)*)
where
#(#generic_params_parameter_where: Parameter),*
{
type Type = (#(#generic_params_parameter_type::Type,)*);
fn matches(params: &[&ParameterInfo]) -> bool {
params.len() == #n #( && #matches_parameter)*
}
}
};
ts.extend(TokenStream::from(impl_ts));
}
Ok(ts)
}
fn parse_range_bound(bound: Expr) -> Result<usize, Error> {
let bound: NonZeroUsize = match bound {
syn::Expr::Lit(ExprLit {
lit: Lit::Int(n),..
}) => n.base10_parse()?,
_ => {
return Err(Error::new(
bound.span(),
"Tuple length bound must be an integer",
))
}
};
Ok(bound.get())
}
| hook | identifier_name |
lib.rs | #![warn(rust_2018_idioms)]
#![cfg_attr(feature = "strict", deny(warnings))]
use std::num::NonZeroUsize;
use proc_macro::TokenStream;
use quote::{format_ident, quote};
use syn::punctuated::Punctuated;
use syn::spanned::Spanned;
use syn::{
parse_macro_input, Error, Expr, ExprLit, ExprRange, FnArg, Index, ItemFn, Lit, LitStr, Pat,
RangeLimits, ReturnType, Token, Type,
};
/// Creates an inline hook at a C# method.
///
/// # Panics
///
/// * `install` will panic if the class or method was not found.
/// * `original` will panic if the hook has not yet been installed.
///
/// # Examples
///
/// ```no_run
/// use quest_hook::inline_hook::hook;
/// use quest_hook::libil2cpp::Il2CppObject;
/// use log::info;
///
/// #[hook("", "MainSettingsModelSO", "OnEnable")]
/// fn on_enable(this: &Il2CppObject) {
/// info!("MainSettingsModelSO.OnEnable was called!");
///
/// on_enable.original(this); // Call the original C# method
/// }
///
/// #[no_mangle]
/// pub extern "C" fn load() {
/// info!("Installing hooks!");
///
/// on_enable.install(); // Install the hook
///
/// info!("Installed hooks!");
/// }
/// ```
#[proc_macro_attribute]
pub fn hook(attr: TokenStream, item: TokenStream) -> TokenStream {
let punctuated_args =
parse_macro_input!(attr with Punctuated<LitStr, Token![,]>::parse_separated_nonempty);
let input = parse_macro_input!(item as ItemFn);
match create_hook(punctuated_args, input) {
Ok(ts) => ts,
Err(err) => err.to_compile_error().into(),
}
}
fn create_hook(
punctuated_args: Punctuated<LitStr, Token![,]>,
input: ItemFn,
) -> Result<TokenStream, Error> {
let args: Vec<String> = punctuated_args.iter().map(LitStr::value).collect();
let (namespace, class, method) = match args.as_slice() {
[n, c, m] => (n, c, m),
_ => {
let msg = format!("Expected 3 arguments, found {}", args.len());
return Err(Error::new_spanned(punctuated_args, msg));
}
};
let ItemFn { sig, block,.. } = input;
let name = sig.ident;
let return_type = sig.output;
let typecheck_return_type = match &return_type {
ReturnType::Default => quote! { () },
ReturnType::Type(_, ty) => quote! { #ty },
};
let hook_name = format_ident!("{}_hook", name);
let hook_args = sig.inputs;
let mut this_arg_type = None;
let mut num_hook_args: usize = 0;
for hook_arg in &hook_args {
let arg_type = match hook_arg {
FnArg::Typed(arg_type) => arg_type,
FnArg::Receiver(_) => {
let msg = "Hook argument cannot be `self`";
return Err(Error::new_spanned(hook_arg, msg));
}
};
match &*arg_type.pat {
// `il2cpp_class_get_method_from_name` does not count `this` in its argument count
Pat::Ident(pat_ident) if pat_ident.ident == "this" => {
if this_arg_type.is_some() {
let msg = "There cannot be more than one `this` argument.";
return Err(Error::new_spanned(hook_arg, msg));
}
if num_hook_args > 0 {
let msg = "`this` must be the first argument.";
return Err(Error::new_spanned(hook_arg, msg));
}
this_arg_type = Some(arg_type.ty.clone());
}
_ => num_hook_args += 1,
}
}
let hook_struct_name = format_ident!("{}_Struct", name);
let mut hook_args_untyped: Punctuated<Pat, Token![,]> = Punctuated::new();
let mut typecheck_arg_types: Punctuated<Type, Token![,]> = Punctuated::new();
for arg in &hook_args {
if let FnArg::Typed(arg) = arg {
hook_args_untyped.push((*arg.pat).clone());
match &*arg.pat {
Pat::Ident(pat_ident) if pat_ident.ident == "this" => continue,
_ => typecheck_arg_types.push((*arg.ty).clone()),
}
}
}
let typecheck_this_type = match &this_arg_type {
None => quote! { () },
Some(ty) => quote! { #ty },
};
let tokens = quote! {
pub extern "C" fn #hook_name ( #hook_args ) #return_type #block
#[allow(non_camel_case_types)]
struct #hook_struct_name {
original: ::std::sync::atomic::AtomicPtr<()>,
namespace: &'static str,
class_name: &'static str,
method_name: &'static str,
parameters_count: usize,
}
impl #hook_struct_name {
fn install(&self) {
use ::quest_hook::libil2cpp::WrapRaw;
let class = ::quest_hook::libil2cpp::Il2CppClass::find(self.namespace, self.class_name).expect("Class not found");
let method = class.find_method_callee::<
#typecheck_this_type,
( #typecheck_arg_types ),
#typecheck_return_type,
#num_hook_args
>(self.method_name).expect("Method not found");
let mut temp = ::std::ptr::null_mut();
unsafe {
::quest_hook::inline_hook::A64HookFunction(
::std::mem::transmute::<unsafe extern "C" fn(), *mut ::std::ffi::c_void>(method.raw().methodPointer.unwrap()),
::std::mem::transmute::<extern "C" fn( #hook_args ) #return_type, *mut ::std::ffi::c_void>( #hook_name ),
&mut temp,
);
self.original.store(
::std::mem::transmute::<*mut ::std::ffi::c_void, *mut ()>(temp),
::std::sync::atomic::Ordering::Relaxed
);
}
}
fn original(&self, #hook_args ) #return_type {
let ptr = self.original.load(::std::sync::atomic::Ordering::Relaxed);
let original = unsafe {
::std::mem::transmute::<*const (), Option<extern "C" fn( #hook_args ) #return_type >>(ptr)
};
(original.expect("Hook is not installed"))( #hook_args_untyped )
}
fn hook(&self, #hook_args ) #return_type {
#hook_name( #hook_args_untyped )
}
}
impl ::quest_hook::Hook for #hook_struct_name {
fn install(&self) {
self.install()
}
fn namespace(&self) -> &'static str {
self.namespace
}
fn class_name(&self) -> &'static str {
self.class_name
}
fn method_name(&self) -> &'static str {
self.method_name
}
fn parameters_count(&self) -> usize {
self.parameters_count
}
fn hook(&self) -> *mut () {
unsafe {
::std::mem::transmute::<extern "C" fn( #hook_args ) #return_type, *mut ()>( #hook_name )
}
}
fn original(&self) -> *mut () {
self.original.load(::std::sync::atomic::Ordering::Relaxed)
}
}
#[allow(non_upper_case_globals)]
static #name: #hook_struct_name = #hook_struct_name {
original: ::std::sync::atomic::AtomicPtr::new(::std::ptr::null_mut()),
namespace: #namespace,
class_name: #class,
method_name: #method,
parameters_count: #num_hook_args as usize
};
};
Ok(tokens.into())
}
#[doc(hidden)]
#[proc_macro]
pub fn impl_arguments_parameters(input: TokenStream) -> TokenStream |
fn create_impl_arguments_parameters(range: ExprRange) -> Result<TokenStream, Error> {
let span = range.span();
let start = range
.from
.ok_or_else(|| Error::new(span, "Tuple length range must have a lower bound"))?;
let start = parse_range_bound(*start)?;
let end = range
.to
.ok_or_else(|| Error::new(span, "Tuple length range must have an upper bound"))?;
let end = parse_range_bound(*end)?;
let range = match range.limits {
RangeLimits::HalfOpen(_) if end <= start => {
return Err(Error::new(span, "Tuple length range must be valid"))
}
RangeLimits::HalfOpen(_) => start..end,
RangeLimits::Closed(_) if end < start => {
return Err(Error::new(span, "Tuple length range must be valid"))
}
RangeLimits::Closed(_) => start..(end + 1),
};
let mut ts = TokenStream::new();
for n in range {
let generic_params_argument = (1..=n).map(|n| format_ident!("A{}", n));
let matches_argument = generic_params_argument
.clone()
.enumerate()
.map(|(n, gp)| quote!(<#gp>::matches(args[#n].ty())));
let invokables = (0..n).map(Index::from).map(|n| quote!(self.#n.invokable()));
let generic_params_parameter = (1..=n).map(|n| format_ident!("P{}", n));
let matches_parameter = generic_params_parameter
.clone()
.enumerate()
.map(|(n, gp)| quote!(<#gp>::matches(params[#n].ty())));
let generic_params_argument_tuple = generic_params_argument.clone();
let generic_params_argument_where = generic_params_argument.clone();
let generic_params_argument_type = generic_params_argument.clone();
let generic_params_parameter_tuple = generic_params_parameter.clone();
let generic_params_parameter_where = generic_params_parameter.clone();
let generic_params_parameter_type = generic_params_parameter.clone();
let impl_ts = quote! {
unsafe impl<#(#generic_params_argument),*> Arguments<#n> for (#(#generic_params_argument_tuple,)*)
where
#(#generic_params_argument_where: Argument),*
{
type Type = (#(#generic_params_argument_type::Type,)*);
fn matches(args: &[&ParameterInfo]) -> bool {
args.len() == #n #( && #matches_argument)*
}
fn invokable(&self) -> [*mut c_void; #n] {
[#(#invokables),*]
}
}
unsafe impl<#(#generic_params_parameter),*> Parameters<#n> for (#(#generic_params_parameter_tuple,)*)
where
#(#generic_params_parameter_where: Parameter),*
{
type Type = (#(#generic_params_parameter_type::Type,)*);
fn matches(params: &[&ParameterInfo]) -> bool {
params.len() == #n #( && #matches_parameter)*
}
}
};
ts.extend(TokenStream::from(impl_ts));
}
Ok(ts)
}
fn parse_range_bound(bound: Expr) -> Result<usize, Error> {
let bound: NonZeroUsize = match bound {
syn::Expr::Lit(ExprLit {
lit: Lit::Int(n),..
}) => n.base10_parse()?,
_ => {
return Err(Error::new(
bound.span(),
"Tuple length bound must be an integer",
))
}
};
Ok(bound.get())
}
| {
let range = parse_macro_input!(input as ExprRange);
match create_impl_arguments_parameters(range) {
Ok(ts) => ts,
Err(err) => err.to_compile_error().into(),
}
} | identifier_body |
lib.rs | #![warn(rust_2018_idioms)]
#![cfg_attr(feature = "strict", deny(warnings))]
use std::num::NonZeroUsize;
use proc_macro::TokenStream;
use quote::{format_ident, quote};
use syn::punctuated::Punctuated;
use syn::spanned::Spanned;
use syn::{
parse_macro_input, Error, Expr, ExprLit, ExprRange, FnArg, Index, ItemFn, Lit, LitStr, Pat,
RangeLimits, ReturnType, Token, Type,
};
/// Creates an inline hook at a C# method.
///
/// # Panics
///
/// * `install` will panic if the class or method was not found.
/// * `original` will panic if the hook has not yet been installed.
///
/// # Examples
///
/// ```no_run
/// use quest_hook::inline_hook::hook;
/// use quest_hook::libil2cpp::Il2CppObject;
/// use log::info;
///
/// #[hook("", "MainSettingsModelSO", "OnEnable")]
/// fn on_enable(this: &Il2CppObject) {
/// info!("MainSettingsModelSO.OnEnable was called!");
///
/// on_enable.original(this); // Call the original C# method
/// }
///
/// #[no_mangle]
/// pub extern "C" fn load() {
/// info!("Installing hooks!");
///
/// on_enable.install(); // Install the hook
///
/// info!("Installed hooks!");
/// }
/// ```
#[proc_macro_attribute]
pub fn hook(attr: TokenStream, item: TokenStream) -> TokenStream {
let punctuated_args =
parse_macro_input!(attr with Punctuated<LitStr, Token![,]>::parse_separated_nonempty);
let input = parse_macro_input!(item as ItemFn);
match create_hook(punctuated_args, input) {
Ok(ts) => ts,
Err(err) => err.to_compile_error().into(),
}
}
fn create_hook(
punctuated_args: Punctuated<LitStr, Token![,]>,
input: ItemFn,
) -> Result<TokenStream, Error> {
let args: Vec<String> = punctuated_args.iter().map(LitStr::value).collect();
let (namespace, class, method) = match args.as_slice() {
[n, c, m] => (n, c, m),
_ => {
let msg = format!("Expected 3 arguments, found {}", args.len());
return Err(Error::new_spanned(punctuated_args, msg));
}
};
let ItemFn { sig, block,.. } = input; | let name = sig.ident;
let return_type = sig.output;
let typecheck_return_type = match &return_type {
ReturnType::Default => quote! { () },
ReturnType::Type(_, ty) => quote! { #ty },
};
let hook_name = format_ident!("{}_hook", name);
let hook_args = sig.inputs;
let mut this_arg_type = None;
let mut num_hook_args: usize = 0;
for hook_arg in &hook_args {
let arg_type = match hook_arg {
FnArg::Typed(arg_type) => arg_type,
FnArg::Receiver(_) => {
let msg = "Hook argument cannot be `self`";
return Err(Error::new_spanned(hook_arg, msg));
}
};
match &*arg_type.pat {
// `il2cpp_class_get_method_from_name` does not count `this` in its argument count
Pat::Ident(pat_ident) if pat_ident.ident == "this" => {
if this_arg_type.is_some() {
let msg = "There cannot be more than one `this` argument.";
return Err(Error::new_spanned(hook_arg, msg));
}
if num_hook_args > 0 {
let msg = "`this` must be the first argument.";
return Err(Error::new_spanned(hook_arg, msg));
}
this_arg_type = Some(arg_type.ty.clone());
}
_ => num_hook_args += 1,
}
}
let hook_struct_name = format_ident!("{}_Struct", name);
let mut hook_args_untyped: Punctuated<Pat, Token![,]> = Punctuated::new();
let mut typecheck_arg_types: Punctuated<Type, Token![,]> = Punctuated::new();
for arg in &hook_args {
if let FnArg::Typed(arg) = arg {
hook_args_untyped.push((*arg.pat).clone());
match &*arg.pat {
Pat::Ident(pat_ident) if pat_ident.ident == "this" => continue,
_ => typecheck_arg_types.push((*arg.ty).clone()),
}
}
}
let typecheck_this_type = match &this_arg_type {
None => quote! { () },
Some(ty) => quote! { #ty },
};
let tokens = quote! {
pub extern "C" fn #hook_name ( #hook_args ) #return_type #block
#[allow(non_camel_case_types)]
struct #hook_struct_name {
original: ::std::sync::atomic::AtomicPtr<()>,
namespace: &'static str,
class_name: &'static str,
method_name: &'static str,
parameters_count: usize,
}
impl #hook_struct_name {
fn install(&self) {
use ::quest_hook::libil2cpp::WrapRaw;
let class = ::quest_hook::libil2cpp::Il2CppClass::find(self.namespace, self.class_name).expect("Class not found");
let method = class.find_method_callee::<
#typecheck_this_type,
( #typecheck_arg_types ),
#typecheck_return_type,
#num_hook_args
>(self.method_name).expect("Method not found");
let mut temp = ::std::ptr::null_mut();
unsafe {
::quest_hook::inline_hook::A64HookFunction(
::std::mem::transmute::<unsafe extern "C" fn(), *mut ::std::ffi::c_void>(method.raw().methodPointer.unwrap()),
::std::mem::transmute::<extern "C" fn( #hook_args ) #return_type, *mut ::std::ffi::c_void>( #hook_name ),
&mut temp,
);
self.original.store(
::std::mem::transmute::<*mut ::std::ffi::c_void, *mut ()>(temp),
::std::sync::atomic::Ordering::Relaxed
);
}
}
fn original(&self, #hook_args ) #return_type {
let ptr = self.original.load(::std::sync::atomic::Ordering::Relaxed);
let original = unsafe {
::std::mem::transmute::<*const (), Option<extern "C" fn( #hook_args ) #return_type >>(ptr)
};
(original.expect("Hook is not installed"))( #hook_args_untyped )
}
fn hook(&self, #hook_args ) #return_type {
#hook_name( #hook_args_untyped )
}
}
impl ::quest_hook::Hook for #hook_struct_name {
fn install(&self) {
self.install()
}
fn namespace(&self) -> &'static str {
self.namespace
}
fn class_name(&self) -> &'static str {
self.class_name
}
fn method_name(&self) -> &'static str {
self.method_name
}
fn parameters_count(&self) -> usize {
self.parameters_count
}
fn hook(&self) -> *mut () {
unsafe {
::std::mem::transmute::<extern "C" fn( #hook_args ) #return_type, *mut ()>( #hook_name )
}
}
fn original(&self) -> *mut () {
self.original.load(::std::sync::atomic::Ordering::Relaxed)
}
}
#[allow(non_upper_case_globals)]
static #name: #hook_struct_name = #hook_struct_name {
original: ::std::sync::atomic::AtomicPtr::new(::std::ptr::null_mut()),
namespace: #namespace,
class_name: #class,
method_name: #method,
parameters_count: #num_hook_args as usize
};
};
Ok(tokens.into())
}
#[doc(hidden)]
#[proc_macro]
pub fn impl_arguments_parameters(input: TokenStream) -> TokenStream {
let range = parse_macro_input!(input as ExprRange);
match create_impl_arguments_parameters(range) {
Ok(ts) => ts,
Err(err) => err.to_compile_error().into(),
}
}
fn create_impl_arguments_parameters(range: ExprRange) -> Result<TokenStream, Error> {
let span = range.span();
let start = range
.from
.ok_or_else(|| Error::new(span, "Tuple length range must have a lower bound"))?;
let start = parse_range_bound(*start)?;
let end = range
.to
.ok_or_else(|| Error::new(span, "Tuple length range must have an upper bound"))?;
let end = parse_range_bound(*end)?;
let range = match range.limits {
RangeLimits::HalfOpen(_) if end <= start => {
return Err(Error::new(span, "Tuple length range must be valid"))
}
RangeLimits::HalfOpen(_) => start..end,
RangeLimits::Closed(_) if end < start => {
return Err(Error::new(span, "Tuple length range must be valid"))
}
RangeLimits::Closed(_) => start..(end + 1),
};
let mut ts = TokenStream::new();
for n in range {
let generic_params_argument = (1..=n).map(|n| format_ident!("A{}", n));
let matches_argument = generic_params_argument
.clone()
.enumerate()
.map(|(n, gp)| quote!(<#gp>::matches(args[#n].ty())));
let invokables = (0..n).map(Index::from).map(|n| quote!(self.#n.invokable()));
let generic_params_parameter = (1..=n).map(|n| format_ident!("P{}", n));
let matches_parameter = generic_params_parameter
.clone()
.enumerate()
.map(|(n, gp)| quote!(<#gp>::matches(params[#n].ty())));
let generic_params_argument_tuple = generic_params_argument.clone();
let generic_params_argument_where = generic_params_argument.clone();
let generic_params_argument_type = generic_params_argument.clone();
let generic_params_parameter_tuple = generic_params_parameter.clone();
let generic_params_parameter_where = generic_params_parameter.clone();
let generic_params_parameter_type = generic_params_parameter.clone();
let impl_ts = quote! {
unsafe impl<#(#generic_params_argument),*> Arguments<#n> for (#(#generic_params_argument_tuple,)*)
where
#(#generic_params_argument_where: Argument),*
{
type Type = (#(#generic_params_argument_type::Type,)*);
fn matches(args: &[&ParameterInfo]) -> bool {
args.len() == #n #( && #matches_argument)*
}
fn invokable(&self) -> [*mut c_void; #n] {
[#(#invokables),*]
}
}
unsafe impl<#(#generic_params_parameter),*> Parameters<#n> for (#(#generic_params_parameter_tuple,)*)
where
#(#generic_params_parameter_where: Parameter),*
{
type Type = (#(#generic_params_parameter_type::Type,)*);
fn matches(params: &[&ParameterInfo]) -> bool {
params.len() == #n #( && #matches_parameter)*
}
}
};
ts.extend(TokenStream::from(impl_ts));
}
Ok(ts)
}
fn parse_range_bound(bound: Expr) -> Result<usize, Error> {
let bound: NonZeroUsize = match bound {
syn::Expr::Lit(ExprLit {
lit: Lit::Int(n),..
}) => n.base10_parse()?,
_ => {
return Err(Error::new(
bound.span(),
"Tuple length bound must be an integer",
))
}
};
Ok(bound.get())
} | random_line_split |
|
imaginate.rs | use crate::wasm_application_io::WasmEditorApi;
use core::any::TypeId;
use core::future::Future;
use futures::{future::Either, TryFutureExt};
use glam::{DVec2, U64Vec2};
use graph_craft::imaginate_input::{ImaginateController, ImaginateMaskStartingFill, ImaginatePreferences, ImaginateSamplingMethod, ImaginateServerStatus, ImaginateStatus, ImaginateTerminationHandle};
use graphene_core::application_io::NodeGraphUpdateMessage;
use graphene_core::raster::{Color, Image, Luma, Pixel};
use image::{DynamicImage, ImageBuffer, ImageOutputFormat};
use reqwest::Url;
const PROGRESS_EVERY_N_STEPS: u32 = 5;
const SDAPI_TEXT_TO_IMAGE: &str = "sdapi/v1/txt2img";
const SDAPI_IMAGE_TO_IMAGE: &str = "sdapi/v1/img2img";
const SDAPI_PROGRESS: &str = "sdapi/v1/progress?skip_current_image=true";
const SDAPI_TERMINATE: &str = "sdapi/v1/interrupt";
fn new_client() -> Result<reqwest::Client, Error> {
reqwest::ClientBuilder::new().build().map_err(Error::ClientBuild)
}
fn parse_url(url: &str) -> Result<Url, Error> {
url.try_into().map_err(|err| Error::UrlParse { text: url.into(), err })
}
fn join_url(base_url: &Url, path: &str) -> Result<Url, Error> {
base_url.join(path).map_err(|err| Error::UrlParse { text: base_url.to_string(), err })
}
fn new_get_request<U: reqwest::IntoUrl>(client: &reqwest::Client, url: U) -> Result<reqwest::Request, Error> {
client.get(url).header("Accept", "*/*").build().map_err(Error::RequestBuild)
}
pub struct ImaginatePersistentData {
pending_server_check: Option<futures::channel::oneshot::Receiver<reqwest::Result<reqwest::Response>>>,
host_name: Url,
client: Option<reqwest::Client>,
server_status: ImaginateServerStatus,
}
impl core::fmt::Debug for ImaginatePersistentData {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct(core::any::type_name::<Self>())
.field("pending_server_check", &self.pending_server_check.is_some())
.field("host_name", &self.host_name)
.field("status", &self.server_status)
.finish()
}
}
impl Default for ImaginatePersistentData {
fn default() -> Self {
let mut status = ImaginateServerStatus::default();
let client = new_client().map_err(|err| status = ImaginateServerStatus::Failed(err.to_string())).ok();
let ImaginatePreferences { host_name } = Default::default();
Self {
pending_server_check: None,
host_name: parse_url(&host_name).unwrap(),
client,
server_status: status,
}
}
}
type ImaginateFuture = core::pin::Pin<Box<dyn Future<Output = ()> +'static>>;
impl ImaginatePersistentData {
pub fn set_host_name(&mut self, name: &str) |
fn initiate_server_check_maybe_fail(&mut self) -> Result<Option<ImaginateFuture>, Error> {
use futures::future::FutureExt;
let Some(client) = &self.client else {
return Ok(None);
};
if self.pending_server_check.is_some() {
return Ok(None);
}
self.server_status = ImaginateServerStatus::Checking;
let url = join_url(&self.host_name, SDAPI_PROGRESS)?;
let request = new_get_request(client, url)?;
let (send, recv) = futures::channel::oneshot::channel();
let response_future = client.execute(request).map(move |r| {
let _ = send.send(r);
});
self.pending_server_check = Some(recv);
Ok(Some(Box::pin(response_future)))
}
pub fn initiate_server_check(&mut self) -> Option<ImaginateFuture> {
match self.initiate_server_check_maybe_fail() {
Ok(f) => f,
Err(err) => {
self.server_status = ImaginateServerStatus::Failed(err.to_string());
None
}
}
}
pub fn poll_server_check(&mut self) {
if let Some(mut check) = self.pending_server_check.take() {
self.server_status = match check.try_recv().map(|r| r.map(|r| r.and_then(reqwest::Response::error_for_status))) {
Ok(Some(Ok(_response))) => ImaginateServerStatus::Connected,
Ok(Some(Err(_))) | Err(_) => ImaginateServerStatus::Unavailable,
Ok(None) => {
self.pending_server_check = Some(check);
ImaginateServerStatus::Checking
}
}
}
}
pub fn server_status(&self) -> &ImaginateServerStatus {
&self.server_status
}
pub fn is_checking(&self) -> bool {
matches!(self.server_status, ImaginateServerStatus::Checking)
}
}
#[derive(Debug)]
struct ImaginateFutureAbortHandle(futures::future::AbortHandle);
impl ImaginateTerminationHandle for ImaginateFutureAbortHandle {
fn terminate(&self) {
self.0.abort()
}
}
#[derive(Debug)]
enum Error {
UrlParse { text: String, err: <&'static str as TryInto<Url>>::Error },
ClientBuild(reqwest::Error),
RequestBuild(reqwest::Error),
Request(reqwest::Error),
ResponseFormat(reqwest::Error),
NoImage,
Base64Decode(base64::DecodeError),
ImageDecode(image::error::ImageError),
ImageEncode(image::error::ImageError),
UnsupportedPixelType(&'static str),
InconsistentImageSize,
Terminated,
TerminationFailed(reqwest::Error),
}
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
match self {
Self::UrlParse { text, err } => write!(f, "invalid url '{text}' ({err})"),
Self::ClientBuild(err) => write!(f, "failed to create a reqwest client ({err})"),
Self::RequestBuild(err) => write!(f, "failed to create a reqwest request ({err})"),
Self::Request(err) => write!(f, "request failed ({err})"),
Self::ResponseFormat(err) => write!(f, "got an invalid API response ({err})"),
Self::NoImage => write!(f, "got an empty API response"),
Self::Base64Decode(err) => write!(f, "failed to decode base64 encoded image ({err})"),
Self::ImageDecode(err) => write!(f, "failed to decode png image ({err})"),
Self::ImageEncode(err) => write!(f, "failed to encode png image ({err})"),
Self::UnsupportedPixelType(ty) => write!(f, "pixel type `{ty}` not supported for imaginate images"),
Self::InconsistentImageSize => write!(f, "image width and height do not match the image byte size"),
Self::Terminated => write!(f, "imaginate request was terminated by the user"),
Self::TerminationFailed(err) => write!(f, "termination failed ({err})"),
}
}
}
impl std::error::Error for Error {}
#[derive(Default, Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize))]
struct ImageResponse {
images: Vec<String>,
}
#[derive(Default, Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize))]
struct ProgressResponse {
progress: f64,
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateTextToImageRequestOverrideSettings {
show_progress_every_n_steps: u32,
}
impl Default for ImaginateTextToImageRequestOverrideSettings {
fn default() -> Self {
Self {
show_progress_every_n_steps: PROGRESS_EVERY_N_STEPS,
}
}
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateImageToImageRequestOverrideSettings {
show_progress_every_n_steps: u32,
img2img_fix_steps: bool,
}
impl Default for ImaginateImageToImageRequestOverrideSettings {
fn default() -> Self {
Self {
show_progress_every_n_steps: PROGRESS_EVERY_N_STEPS,
img2img_fix_steps: true,
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateTextToImageRequest<'a> {
#[serde(flatten)]
common: ImaginateCommonImageRequest<'a>,
override_settings: ImaginateTextToImageRequestOverrideSettings,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateMask {
mask: String,
mask_blur: String,
inpainting_fill: u32,
inpaint_full_res: bool,
inpainting_mask_invert: u32,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateImageToImageRequest<'a> {
#[serde(flatten)]
common: ImaginateCommonImageRequest<'a>,
override_settings: ImaginateImageToImageRequestOverrideSettings,
init_images: Vec<String>,
denoising_strength: f64,
#[serde(flatten)]
mask: Option<ImaginateMask>,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateCommonImageRequest<'a> {
prompt: String,
seed: f64,
steps: u32,
cfg_scale: f64,
width: f64,
height: f64,
restore_faces: bool,
tiling: bool,
negative_prompt: String,
sampler_index: &'a str,
}
#[cfg(feature = "imaginate")]
#[allow(clippy::too_many_arguments)]
pub async fn imaginate<'a, P: Pixel>(
image: Image<P>,
editor_api: impl Future<Output = WasmEditorApi<'a>>,
controller: ImaginateController,
seed: impl Future<Output = f64>,
res: impl Future<Output = Option<DVec2>>,
samples: impl Future<Output = u32>,
sampling_method: impl Future<Output = ImaginateSamplingMethod>,
prompt_guidance: impl Future<Output = f32>,
prompt: impl Future<Output = String>,
negative_prompt: impl Future<Output = String>,
adapt_input_image: impl Future<Output = bool>,
image_creativity: impl Future<Output = f32>,
masking_layer: impl Future<Output = Option<Vec<u64>>>,
inpaint: impl Future<Output = bool>,
mask_blur: impl Future<Output = f32>,
mask_starting_fill: impl Future<Output = ImaginateMaskStartingFill>,
improve_faces: impl Future<Output = bool>,
tiling: impl Future<Output = bool>,
) -> Image<P> {
let WasmEditorApi {
node_graph_message_sender,
imaginate_preferences,
..
} = editor_api.await;
let set_progress = |progress: ImaginateStatus| {
controller.set_status(progress);
node_graph_message_sender.send(NodeGraphUpdateMessage::ImaginateStatusUpdate);
};
let host_name = imaginate_preferences.get_host_name();
imaginate_maybe_fail(
image,
host_name,
set_progress,
&controller,
seed,
res,
samples,
sampling_method,
prompt_guidance,
prompt,
negative_prompt,
adapt_input_image,
image_creativity,
masking_layer,
inpaint,
mask_blur,
mask_starting_fill,
improve_faces,
tiling,
)
.await
.unwrap_or_else(|err| {
match err {
Error::Terminated => {
set_progress(ImaginateStatus::Terminated);
}
err => {
error!("{err}");
set_progress(ImaginateStatus::Failed(err.to_string()));
}
};
Image::empty()
})
}
#[cfg(feature = "imaginate")]
#[allow(clippy::too_many_arguments)]
async fn imaginate_maybe_fail<'a, P: Pixel, F: Fn(ImaginateStatus)>(
image: Image<P>,
host_name: &str,
set_progress: F,
controller: &ImaginateController,
seed: impl Future<Output = f64>,
res: impl Future<Output = Option<DVec2>>,
samples: impl Future<Output = u32>,
sampling_method: impl Future<Output = ImaginateSamplingMethod>,
prompt_guidance: impl Future<Output = f32>,
prompt: impl Future<Output = String>,
negative_prompt: impl Future<Output = String>,
adapt_input_image: impl Future<Output = bool>,
image_creativity: impl Future<Output = f32>,
_masking_layer: impl Future<Output = Option<Vec<u64>>>,
_inpaint: impl Future<Output = bool>,
_mask_blur: impl Future<Output = f32>,
_mask_starting_fill: impl Future<Output = ImaginateMaskStartingFill>,
improve_faces: impl Future<Output = bool>,
tiling: impl Future<Output = bool>,
) -> Result<Image<P>, Error> {
set_progress(ImaginateStatus::Beginning);
let base_url: Url = parse_url(host_name)?;
let client = new_client()?;
let sampler_index = sampling_method.await;
let sampler_index = sampler_index.api_value();
let res = res.await.unwrap_or_else(|| {
let (width, height) = pick_safe_imaginate_resolution((image.width as _, image.height as _));
DVec2::new(width as _, height as _)
});
let common_request_data = ImaginateCommonImageRequest {
prompt: prompt.await,
seed: seed.await,
steps: samples.await,
cfg_scale: prompt_guidance.await as f64,
width: res.x,
height: res.y,
restore_faces: improve_faces.await,
tiling: tiling.await,
negative_prompt: negative_prompt.await,
sampler_index,
};
let request_builder = if adapt_input_image.await {
let base64_data = image_to_base64(image)?;
let request_data = ImaginateImageToImageRequest {
common: common_request_data,
override_settings: Default::default(),
init_images: vec![base64_data],
denoising_strength: image_creativity.await as f64 * 0.01,
mask: None,
};
let url = join_url(&base_url, SDAPI_IMAGE_TO_IMAGE)?;
client.post(url).json(&request_data)
} else {
let request_data = ImaginateTextToImageRequest {
common: common_request_data,
override_settings: Default::default(),
};
let url = join_url(&base_url, SDAPI_TEXT_TO_IMAGE)?;
client.post(url).json(&request_data)
};
let request = request_builder.header("Accept", "*/*").build().map_err(Error::RequestBuild)?;
let (response_future, abort_handle) = futures::future::abortable(client.execute(request));
controller.set_termination_handle(Box::new(ImaginateFutureAbortHandle(abort_handle)));
let progress_url = join_url(&base_url, SDAPI_PROGRESS)?;
futures::pin_mut!(response_future);
let response = loop {
let progress_request = new_get_request(&client, progress_url.clone())?;
let progress_response_future = client.execute(progress_request).and_then(|response| response.json());
futures::pin_mut!(progress_response_future);
response_future = match futures::future::select(response_future, progress_response_future).await {
Either::Left((response, _)) => break response,
Either::Right((progress, response_future)) => {
if let Ok(ProgressResponse { progress }) = progress {
set_progress(ImaginateStatus::Generating(progress * 100.));
}
response_future
}
};
};
let response = match response {
Ok(response) => response.and_then(reqwest::Response::error_for_status).map_err(Error::Request)?,
Err(_aborted) => {
set_progress(ImaginateStatus::Terminating);
let url = join_url(&base_url, SDAPI_TERMINATE)?;
let request = client.post(url).build().map_err(Error::RequestBuild)?;
// The user probably doesn't really care if the server side was really aborted or if there was an network error.
// So we fool them that the request was terminated if the termination request in reality failed.
let _ = client.execute(request).await.and_then(reqwest::Response::error_for_status).map_err(Error::TerminationFailed)?;
return Err(Error::Terminated);
}
};
set_progress(ImaginateStatus::Uploading);
let ImageResponse { images } = response.json().await.map_err(Error::ResponseFormat)?;
let result = images.into_iter().next().ok_or(Error::NoImage).and_then(base64_to_image)?;
set_progress(ImaginateStatus::ReadyDone);
Ok(result)
}
fn image_to_base64<P: Pixel>(image: Image<P>) -> Result<String, Error> {
use base64::prelude::*;
let Image { width, height, data } = image;
fn cast_with_f32<S: Pixel, D: image::Pixel<Subpixel = f32>>(data: Vec<S>, width: u32, height: u32) -> Result<DynamicImage, Error>
where
DynamicImage: From<ImageBuffer<D, Vec<f32>>>,
{
ImageBuffer::<D, Vec<f32>>::from_raw(width, height, bytemuck::cast_vec(data))
.ok_or(Error::InconsistentImageSize)
.map(Into::into)
}
let image: DynamicImage = match TypeId::of::<P>() {
id if id == TypeId::of::<Color>() => cast_with_f32::<_, image::Rgba<f32>>(data, width, height)?
// we need to do this cast, because png does not support rgba32f
.to_rgba16().into(),
id if id == TypeId::of::<Luma>() => cast_with_f32::<_, image::Luma<f32>>(data, width, height)?
// we need to do this cast, because png does not support luma32f
.to_luma16().into(),
_ => return Err(Error::UnsupportedPixelType(core::any::type_name::<P>())),
};
let mut png_data = std::io::Cursor::new(vec![]);
image.write_to(&mut png_data, ImageOutputFormat::Png).map_err(Error::ImageEncode)?;
Ok(BASE64_STANDARD.encode(png_data.into_inner()))
}
fn base64_to_image<D: AsRef<[u8]>, P: Pixel>(base64_data: D) -> Result<Image<P>, Error> {
use base64::prelude::*;
let png_data = BASE64_STANDARD.decode(base64_data).map_err(Error::Base64Decode)?;
let dyn_image = image::load_from_memory_with_format(&png_data, image::ImageFormat::Png).map_err(Error::ImageDecode)?;
let (width, height) = (dyn_image.width(), dyn_image.height());
let result_data: Vec<P> = match TypeId::of::<P>() {
id if id == TypeId::of::<Color>() => bytemuck::cast_vec(dyn_image.into_rgba32f().into_raw()),
id if id == TypeId::of::<Luma>() => bytemuck::cast_vec(dyn_image.to_luma32f().into_raw()),
_ => return Err(Error::UnsupportedPixelType(core::any::type_name::<P>())),
};
Ok(Image { data: result_data, width, height })
}
pub fn pick_safe_imaginate_resolution((width, height): (f64, f64)) -> (u64, u64) {
const MAX_RESOLUTION: u64 = 1000 * 1000;
// this is the maximum width/height that can be obtained
const MAX_DIMENSION: u64 = (MAX_RESOLUTION / 64) &!63;
// round the resolution to the nearest multiple of 64
let size = (DVec2::new(width, height).round().clamp(DVec2::ZERO, DVec2::splat(MAX_DIMENSION as _)).as_u64vec2() + U64Vec2::splat(32)).max(U64Vec2::splat(64)) &!U64Vec2::splat(63);
let resolution = size.x * size.y;
if resolution > MAX_RESOLUTION {
// scale down the image, so it is smaller than MAX_RESOLUTION
let scale = (MAX_RESOLUTION as f64 / resolution as f64).sqrt();
let size = size.as_dvec2() * scale;
if size.x < 64.0 {
// the image is extremely wide
(64, MAX_DIMENSION)
} else if size.y < 64.0 {
// the image is extremely high
(MAX_DIMENSION, 64)
} else {
// round down to a multiple of 64, so that the resolution still is smaller than MAX_RESOLUTION
(size.as_u64vec2() &!U64Vec2::splat(63)).into()
}
} else {
size.into()
}
}
| {
match parse_url(name) {
Ok(url) => self.host_name = url,
Err(err) => self.server_status = ImaginateServerStatus::Failed(err.to_string()),
}
} | identifier_body |
imaginate.rs | use crate::wasm_application_io::WasmEditorApi;
use core::any::TypeId;
use core::future::Future;
use futures::{future::Either, TryFutureExt};
use glam::{DVec2, U64Vec2};
use graph_craft::imaginate_input::{ImaginateController, ImaginateMaskStartingFill, ImaginatePreferences, ImaginateSamplingMethod, ImaginateServerStatus, ImaginateStatus, ImaginateTerminationHandle};
use graphene_core::application_io::NodeGraphUpdateMessage;
use graphene_core::raster::{Color, Image, Luma, Pixel};
use image::{DynamicImage, ImageBuffer, ImageOutputFormat};
use reqwest::Url;
const PROGRESS_EVERY_N_STEPS: u32 = 5;
const SDAPI_TEXT_TO_IMAGE: &str = "sdapi/v1/txt2img";
const SDAPI_IMAGE_TO_IMAGE: &str = "sdapi/v1/img2img";
const SDAPI_PROGRESS: &str = "sdapi/v1/progress?skip_current_image=true";
const SDAPI_TERMINATE: &str = "sdapi/v1/interrupt";
fn new_client() -> Result<reqwest::Client, Error> {
reqwest::ClientBuilder::new().build().map_err(Error::ClientBuild)
}
fn parse_url(url: &str) -> Result<Url, Error> {
url.try_into().map_err(|err| Error::UrlParse { text: url.into(), err })
}
fn join_url(base_url: &Url, path: &str) -> Result<Url, Error> {
base_url.join(path).map_err(|err| Error::UrlParse { text: base_url.to_string(), err })
}
fn new_get_request<U: reqwest::IntoUrl>(client: &reqwest::Client, url: U) -> Result<reqwest::Request, Error> {
client.get(url).header("Accept", "*/*").build().map_err(Error::RequestBuild)
}
pub struct ImaginatePersistentData {
pending_server_check: Option<futures::channel::oneshot::Receiver<reqwest::Result<reqwest::Response>>>,
host_name: Url,
client: Option<reqwest::Client>,
server_status: ImaginateServerStatus,
}
impl core::fmt::Debug for ImaginatePersistentData {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct(core::any::type_name::<Self>())
.field("pending_server_check", &self.pending_server_check.is_some())
.field("host_name", &self.host_name)
.field("status", &self.server_status)
.finish()
}
}
impl Default for ImaginatePersistentData {
fn default() -> Self {
let mut status = ImaginateServerStatus::default();
let client = new_client().map_err(|err| status = ImaginateServerStatus::Failed(err.to_string())).ok();
let ImaginatePreferences { host_name } = Default::default();
Self {
pending_server_check: None,
host_name: parse_url(&host_name).unwrap(),
client,
server_status: status,
}
}
}
type ImaginateFuture = core::pin::Pin<Box<dyn Future<Output = ()> +'static>>;
impl ImaginatePersistentData {
pub fn set_host_name(&mut self, name: &str) {
match parse_url(name) {
Ok(url) => self.host_name = url,
Err(err) => self.server_status = ImaginateServerStatus::Failed(err.to_string()),
}
}
fn initiate_server_check_maybe_fail(&mut self) -> Result<Option<ImaginateFuture>, Error> {
use futures::future::FutureExt;
let Some(client) = &self.client else {
return Ok(None);
};
if self.pending_server_check.is_some() {
return Ok(None);
}
self.server_status = ImaginateServerStatus::Checking;
let url = join_url(&self.host_name, SDAPI_PROGRESS)?;
let request = new_get_request(client, url)?;
let (send, recv) = futures::channel::oneshot::channel();
let response_future = client.execute(request).map(move |r| {
let _ = send.send(r);
});
self.pending_server_check = Some(recv);
Ok(Some(Box::pin(response_future)))
}
pub fn initiate_server_check(&mut self) -> Option<ImaginateFuture> {
match self.initiate_server_check_maybe_fail() {
Ok(f) => f,
Err(err) => {
self.server_status = ImaginateServerStatus::Failed(err.to_string());
None
}
}
}
pub fn poll_server_check(&mut self) {
if let Some(mut check) = self.pending_server_check.take() {
self.server_status = match check.try_recv().map(|r| r.map(|r| r.and_then(reqwest::Response::error_for_status))) {
Ok(Some(Ok(_response))) => ImaginateServerStatus::Connected,
Ok(Some(Err(_))) | Err(_) => ImaginateServerStatus::Unavailable,
Ok(None) => {
self.pending_server_check = Some(check);
ImaginateServerStatus::Checking
}
}
}
}
pub fn server_status(&self) -> &ImaginateServerStatus {
&self.server_status
}
pub fn is_checking(&self) -> bool {
matches!(self.server_status, ImaginateServerStatus::Checking)
}
}
#[derive(Debug)]
struct ImaginateFutureAbortHandle(futures::future::AbortHandle);
impl ImaginateTerminationHandle for ImaginateFutureAbortHandle {
fn terminate(&self) {
self.0.abort()
}
}
#[derive(Debug)]
enum Error {
UrlParse { text: String, err: <&'static str as TryInto<Url>>::Error },
ClientBuild(reqwest::Error),
RequestBuild(reqwest::Error),
Request(reqwest::Error),
ResponseFormat(reqwest::Error),
NoImage,
Base64Decode(base64::DecodeError),
ImageDecode(image::error::ImageError),
ImageEncode(image::error::ImageError),
UnsupportedPixelType(&'static str),
InconsistentImageSize,
Terminated,
TerminationFailed(reqwest::Error),
}
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
match self {
Self::UrlParse { text, err } => write!(f, "invalid url '{text}' ({err})"),
Self::ClientBuild(err) => write!(f, "failed to create a reqwest client ({err})"),
Self::RequestBuild(err) => write!(f, "failed to create a reqwest request ({err})"),
Self::Request(err) => write!(f, "request failed ({err})"),
Self::ResponseFormat(err) => write!(f, "got an invalid API response ({err})"),
Self::NoImage => write!(f, "got an empty API response"),
Self::Base64Decode(err) => write!(f, "failed to decode base64 encoded image ({err})"),
Self::ImageDecode(err) => write!(f, "failed to decode png image ({err})"),
Self::ImageEncode(err) => write!(f, "failed to encode png image ({err})"),
Self::UnsupportedPixelType(ty) => write!(f, "pixel type `{ty}` not supported for imaginate images"),
Self::InconsistentImageSize => write!(f, "image width and height do not match the image byte size"),
Self::Terminated => write!(f, "imaginate request was terminated by the user"),
Self::TerminationFailed(err) => write!(f, "termination failed ({err})"),
}
}
}
impl std::error::Error for Error {}
#[derive(Default, Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize))]
struct ImageResponse {
images: Vec<String>,
}
#[derive(Default, Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize))]
struct ProgressResponse {
progress: f64,
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateTextToImageRequestOverrideSettings {
show_progress_every_n_steps: u32,
}
impl Default for ImaginateTextToImageRequestOverrideSettings {
fn default() -> Self {
Self {
show_progress_every_n_steps: PROGRESS_EVERY_N_STEPS,
}
}
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateImageToImageRequestOverrideSettings {
show_progress_every_n_steps: u32,
img2img_fix_steps: bool,
}
impl Default for ImaginateImageToImageRequestOverrideSettings {
fn default() -> Self {
Self {
show_progress_every_n_steps: PROGRESS_EVERY_N_STEPS,
img2img_fix_steps: true,
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateTextToImageRequest<'a> {
#[serde(flatten)]
common: ImaginateCommonImageRequest<'a>,
override_settings: ImaginateTextToImageRequestOverrideSettings,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateMask {
mask: String,
mask_blur: String,
inpainting_fill: u32,
inpaint_full_res: bool,
inpainting_mask_invert: u32,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateImageToImageRequest<'a> {
#[serde(flatten)]
common: ImaginateCommonImageRequest<'a>,
override_settings: ImaginateImageToImageRequestOverrideSettings,
init_images: Vec<String>,
denoising_strength: f64,
#[serde(flatten)]
mask: Option<ImaginateMask>,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateCommonImageRequest<'a> {
prompt: String,
seed: f64,
steps: u32,
cfg_scale: f64,
width: f64,
height: f64,
restore_faces: bool,
tiling: bool,
negative_prompt: String,
sampler_index: &'a str,
}
#[cfg(feature = "imaginate")]
#[allow(clippy::too_many_arguments)]
pub async fn imaginate<'a, P: Pixel>(
image: Image<P>,
editor_api: impl Future<Output = WasmEditorApi<'a>>,
controller: ImaginateController,
seed: impl Future<Output = f64>,
res: impl Future<Output = Option<DVec2>>,
samples: impl Future<Output = u32>,
sampling_method: impl Future<Output = ImaginateSamplingMethod>,
prompt_guidance: impl Future<Output = f32>,
prompt: impl Future<Output = String>,
negative_prompt: impl Future<Output = String>,
adapt_input_image: impl Future<Output = bool>,
image_creativity: impl Future<Output = f32>,
masking_layer: impl Future<Output = Option<Vec<u64>>>,
inpaint: impl Future<Output = bool>,
mask_blur: impl Future<Output = f32>,
mask_starting_fill: impl Future<Output = ImaginateMaskStartingFill>,
improve_faces: impl Future<Output = bool>,
tiling: impl Future<Output = bool>,
) -> Image<P> {
let WasmEditorApi {
node_graph_message_sender,
imaginate_preferences,
..
} = editor_api.await;
let set_progress = |progress: ImaginateStatus| {
controller.set_status(progress);
node_graph_message_sender.send(NodeGraphUpdateMessage::ImaginateStatusUpdate);
};
let host_name = imaginate_preferences.get_host_name();
imaginate_maybe_fail(
image,
host_name,
set_progress,
&controller,
seed,
res,
samples,
sampling_method,
prompt_guidance,
prompt,
negative_prompt,
adapt_input_image,
image_creativity,
masking_layer,
inpaint,
mask_blur,
mask_starting_fill,
improve_faces,
tiling,
)
.await
.unwrap_or_else(|err| {
match err {
Error::Terminated => {
set_progress(ImaginateStatus::Terminated);
}
err => {
error!("{err}");
set_progress(ImaginateStatus::Failed(err.to_string()));
}
};
Image::empty()
})
}
#[cfg(feature = "imaginate")]
#[allow(clippy::too_many_arguments)]
async fn imaginate_maybe_fail<'a, P: Pixel, F: Fn(ImaginateStatus)>(
image: Image<P>,
host_name: &str,
set_progress: F,
controller: &ImaginateController,
seed: impl Future<Output = f64>,
res: impl Future<Output = Option<DVec2>>,
samples: impl Future<Output = u32>,
sampling_method: impl Future<Output = ImaginateSamplingMethod>,
prompt_guidance: impl Future<Output = f32>,
prompt: impl Future<Output = String>,
negative_prompt: impl Future<Output = String>,
adapt_input_image: impl Future<Output = bool>,
image_creativity: impl Future<Output = f32>,
_masking_layer: impl Future<Output = Option<Vec<u64>>>,
_inpaint: impl Future<Output = bool>,
_mask_blur: impl Future<Output = f32>,
_mask_starting_fill: impl Future<Output = ImaginateMaskStartingFill>,
improve_faces: impl Future<Output = bool>,
tiling: impl Future<Output = bool>,
) -> Result<Image<P>, Error> {
set_progress(ImaginateStatus::Beginning);
let base_url: Url = parse_url(host_name)?;
let client = new_client()?;
let sampler_index = sampling_method.await;
let sampler_index = sampler_index.api_value();
let res = res.await.unwrap_or_else(|| {
let (width, height) = pick_safe_imaginate_resolution((image.width as _, image.height as _));
DVec2::new(width as _, height as _)
});
let common_request_data = ImaginateCommonImageRequest {
prompt: prompt.await,
seed: seed.await,
steps: samples.await,
cfg_scale: prompt_guidance.await as f64,
width: res.x,
height: res.y,
restore_faces: improve_faces.await,
tiling: tiling.await,
negative_prompt: negative_prompt.await,
sampler_index,
};
let request_builder = if adapt_input_image.await {
let base64_data = image_to_base64(image)?;
let request_data = ImaginateImageToImageRequest {
common: common_request_data,
override_settings: Default::default(),
init_images: vec![base64_data],
denoising_strength: image_creativity.await as f64 * 0.01,
mask: None,
};
let url = join_url(&base_url, SDAPI_IMAGE_TO_IMAGE)?;
client.post(url).json(&request_data)
} else {
let request_data = ImaginateTextToImageRequest {
common: common_request_data,
override_settings: Default::default(),
};
let url = join_url(&base_url, SDAPI_TEXT_TO_IMAGE)?;
client.post(url).json(&request_data)
};
let request = request_builder.header("Accept", "*/*").build().map_err(Error::RequestBuild)?;
let (response_future, abort_handle) = futures::future::abortable(client.execute(request));
controller.set_termination_handle(Box::new(ImaginateFutureAbortHandle(abort_handle)));
let progress_url = join_url(&base_url, SDAPI_PROGRESS)?;
futures::pin_mut!(response_future);
let response = loop {
let progress_request = new_get_request(&client, progress_url.clone())?;
let progress_response_future = client.execute(progress_request).and_then(|response| response.json());
futures::pin_mut!(progress_response_future);
response_future = match futures::future::select(response_future, progress_response_future).await {
Either::Left((response, _)) => break response,
Either::Right((progress, response_future)) => |
};
};
let response = match response {
Ok(response) => response.and_then(reqwest::Response::error_for_status).map_err(Error::Request)?,
Err(_aborted) => {
set_progress(ImaginateStatus::Terminating);
let url = join_url(&base_url, SDAPI_TERMINATE)?;
let request = client.post(url).build().map_err(Error::RequestBuild)?;
// The user probably doesn't really care if the server side was really aborted or if there was an network error.
// So we fool them that the request was terminated if the termination request in reality failed.
let _ = client.execute(request).await.and_then(reqwest::Response::error_for_status).map_err(Error::TerminationFailed)?;
return Err(Error::Terminated);
}
};
set_progress(ImaginateStatus::Uploading);
let ImageResponse { images } = response.json().await.map_err(Error::ResponseFormat)?;
let result = images.into_iter().next().ok_or(Error::NoImage).and_then(base64_to_image)?;
set_progress(ImaginateStatus::ReadyDone);
Ok(result)
}
fn image_to_base64<P: Pixel>(image: Image<P>) -> Result<String, Error> {
use base64::prelude::*;
let Image { width, height, data } = image;
fn cast_with_f32<S: Pixel, D: image::Pixel<Subpixel = f32>>(data: Vec<S>, width: u32, height: u32) -> Result<DynamicImage, Error>
where
DynamicImage: From<ImageBuffer<D, Vec<f32>>>,
{
ImageBuffer::<D, Vec<f32>>::from_raw(width, height, bytemuck::cast_vec(data))
.ok_or(Error::InconsistentImageSize)
.map(Into::into)
}
let image: DynamicImage = match TypeId::of::<P>() {
id if id == TypeId::of::<Color>() => cast_with_f32::<_, image::Rgba<f32>>(data, width, height)?
// we need to do this cast, because png does not support rgba32f
.to_rgba16().into(),
id if id == TypeId::of::<Luma>() => cast_with_f32::<_, image::Luma<f32>>(data, width, height)?
// we need to do this cast, because png does not support luma32f
.to_luma16().into(),
_ => return Err(Error::UnsupportedPixelType(core::any::type_name::<P>())),
};
let mut png_data = std::io::Cursor::new(vec![]);
image.write_to(&mut png_data, ImageOutputFormat::Png).map_err(Error::ImageEncode)?;
Ok(BASE64_STANDARD.encode(png_data.into_inner()))
}
fn base64_to_image<D: AsRef<[u8]>, P: Pixel>(base64_data: D) -> Result<Image<P>, Error> {
use base64::prelude::*;
let png_data = BASE64_STANDARD.decode(base64_data).map_err(Error::Base64Decode)?;
let dyn_image = image::load_from_memory_with_format(&png_data, image::ImageFormat::Png).map_err(Error::ImageDecode)?;
let (width, height) = (dyn_image.width(), dyn_image.height());
let result_data: Vec<P> = match TypeId::of::<P>() {
id if id == TypeId::of::<Color>() => bytemuck::cast_vec(dyn_image.into_rgba32f().into_raw()),
id if id == TypeId::of::<Luma>() => bytemuck::cast_vec(dyn_image.to_luma32f().into_raw()),
_ => return Err(Error::UnsupportedPixelType(core::any::type_name::<P>())),
};
Ok(Image { data: result_data, width, height })
}
pub fn pick_safe_imaginate_resolution((width, height): (f64, f64)) -> (u64, u64) {
const MAX_RESOLUTION: u64 = 1000 * 1000;
// this is the maximum width/height that can be obtained
const MAX_DIMENSION: u64 = (MAX_RESOLUTION / 64) &!63;
// round the resolution to the nearest multiple of 64
let size = (DVec2::new(width, height).round().clamp(DVec2::ZERO, DVec2::splat(MAX_DIMENSION as _)).as_u64vec2() + U64Vec2::splat(32)).max(U64Vec2::splat(64)) &!U64Vec2::splat(63);
let resolution = size.x * size.y;
if resolution > MAX_RESOLUTION {
// scale down the image, so it is smaller than MAX_RESOLUTION
let scale = (MAX_RESOLUTION as f64 / resolution as f64).sqrt();
let size = size.as_dvec2() * scale;
if size.x < 64.0 {
// the image is extremely wide
(64, MAX_DIMENSION)
} else if size.y < 64.0 {
// the image is extremely high
(MAX_DIMENSION, 64)
} else {
// round down to a multiple of 64, so that the resolution still is smaller than MAX_RESOLUTION
(size.as_u64vec2() &!U64Vec2::splat(63)).into()
}
} else {
size.into()
}
}
| {
if let Ok(ProgressResponse { progress }) = progress {
set_progress(ImaginateStatus::Generating(progress * 100.));
}
response_future
} | conditional_block |
imaginate.rs | use crate::wasm_application_io::WasmEditorApi;
use core::any::TypeId;
use core::future::Future;
use futures::{future::Either, TryFutureExt};
use glam::{DVec2, U64Vec2};
use graph_craft::imaginate_input::{ImaginateController, ImaginateMaskStartingFill, ImaginatePreferences, ImaginateSamplingMethod, ImaginateServerStatus, ImaginateStatus, ImaginateTerminationHandle};
use graphene_core::application_io::NodeGraphUpdateMessage;
use graphene_core::raster::{Color, Image, Luma, Pixel};
use image::{DynamicImage, ImageBuffer, ImageOutputFormat};
use reqwest::Url;
const PROGRESS_EVERY_N_STEPS: u32 = 5;
const SDAPI_TEXT_TO_IMAGE: &str = "sdapi/v1/txt2img";
const SDAPI_IMAGE_TO_IMAGE: &str = "sdapi/v1/img2img";
const SDAPI_PROGRESS: &str = "sdapi/v1/progress?skip_current_image=true";
const SDAPI_TERMINATE: &str = "sdapi/v1/interrupt";
fn new_client() -> Result<reqwest::Client, Error> {
reqwest::ClientBuilder::new().build().map_err(Error::ClientBuild)
}
fn parse_url(url: &str) -> Result<Url, Error> {
url.try_into().map_err(|err| Error::UrlParse { text: url.into(), err })
}
fn join_url(base_url: &Url, path: &str) -> Result<Url, Error> {
base_url.join(path).map_err(|err| Error::UrlParse { text: base_url.to_string(), err })
}
fn new_get_request<U: reqwest::IntoUrl>(client: &reqwest::Client, url: U) -> Result<reqwest::Request, Error> {
client.get(url).header("Accept", "*/*").build().map_err(Error::RequestBuild)
}
pub struct ImaginatePersistentData {
pending_server_check: Option<futures::channel::oneshot::Receiver<reqwest::Result<reqwest::Response>>>,
host_name: Url,
client: Option<reqwest::Client>,
server_status: ImaginateServerStatus,
}
impl core::fmt::Debug for ImaginatePersistentData {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct(core::any::type_name::<Self>())
.field("pending_server_check", &self.pending_server_check.is_some())
.field("host_name", &self.host_name)
.field("status", &self.server_status)
.finish()
}
}
impl Default for ImaginatePersistentData {
fn default() -> Self {
let mut status = ImaginateServerStatus::default();
let client = new_client().map_err(|err| status = ImaginateServerStatus::Failed(err.to_string())).ok();
let ImaginatePreferences { host_name } = Default::default();
Self {
pending_server_check: None,
host_name: parse_url(&host_name).unwrap(),
client,
server_status: status,
}
}
}
type ImaginateFuture = core::pin::Pin<Box<dyn Future<Output = ()> +'static>>;
impl ImaginatePersistentData {
pub fn set_host_name(&mut self, name: &str) {
match parse_url(name) {
Ok(url) => self.host_name = url,
Err(err) => self.server_status = ImaginateServerStatus::Failed(err.to_string()),
}
}
fn initiate_server_check_maybe_fail(&mut self) -> Result<Option<ImaginateFuture>, Error> {
use futures::future::FutureExt;
let Some(client) = &self.client else {
return Ok(None);
};
if self.pending_server_check.is_some() {
return Ok(None);
}
self.server_status = ImaginateServerStatus::Checking;
let url = join_url(&self.host_name, SDAPI_PROGRESS)?;
let request = new_get_request(client, url)?;
let (send, recv) = futures::channel::oneshot::channel();
let response_future = client.execute(request).map(move |r| {
let _ = send.send(r);
});
self.pending_server_check = Some(recv);
Ok(Some(Box::pin(response_future)))
}
pub fn initiate_server_check(&mut self) -> Option<ImaginateFuture> {
match self.initiate_server_check_maybe_fail() {
Ok(f) => f,
Err(err) => {
self.server_status = ImaginateServerStatus::Failed(err.to_string());
None
}
}
}
pub fn poll_server_check(&mut self) {
if let Some(mut check) = self.pending_server_check.take() {
self.server_status = match check.try_recv().map(|r| r.map(|r| r.and_then(reqwest::Response::error_for_status))) {
Ok(Some(Ok(_response))) => ImaginateServerStatus::Connected,
Ok(Some(Err(_))) | Err(_) => ImaginateServerStatus::Unavailable,
Ok(None) => {
self.pending_server_check = Some(check);
ImaginateServerStatus::Checking
}
}
}
}
pub fn server_status(&self) -> &ImaginateServerStatus {
&self.server_status
}
pub fn is_checking(&self) -> bool {
matches!(self.server_status, ImaginateServerStatus::Checking)
}
}
#[derive(Debug)]
struct | (futures::future::AbortHandle);
impl ImaginateTerminationHandle for ImaginateFutureAbortHandle {
fn terminate(&self) {
self.0.abort()
}
}
#[derive(Debug)]
enum Error {
UrlParse { text: String, err: <&'static str as TryInto<Url>>::Error },
ClientBuild(reqwest::Error),
RequestBuild(reqwest::Error),
Request(reqwest::Error),
ResponseFormat(reqwest::Error),
NoImage,
Base64Decode(base64::DecodeError),
ImageDecode(image::error::ImageError),
ImageEncode(image::error::ImageError),
UnsupportedPixelType(&'static str),
InconsistentImageSize,
Terminated,
TerminationFailed(reqwest::Error),
}
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
match self {
Self::UrlParse { text, err } => write!(f, "invalid url '{text}' ({err})"),
Self::ClientBuild(err) => write!(f, "failed to create a reqwest client ({err})"),
Self::RequestBuild(err) => write!(f, "failed to create a reqwest request ({err})"),
Self::Request(err) => write!(f, "request failed ({err})"),
Self::ResponseFormat(err) => write!(f, "got an invalid API response ({err})"),
Self::NoImage => write!(f, "got an empty API response"),
Self::Base64Decode(err) => write!(f, "failed to decode base64 encoded image ({err})"),
Self::ImageDecode(err) => write!(f, "failed to decode png image ({err})"),
Self::ImageEncode(err) => write!(f, "failed to encode png image ({err})"),
Self::UnsupportedPixelType(ty) => write!(f, "pixel type `{ty}` not supported for imaginate images"),
Self::InconsistentImageSize => write!(f, "image width and height do not match the image byte size"),
Self::Terminated => write!(f, "imaginate request was terminated by the user"),
Self::TerminationFailed(err) => write!(f, "termination failed ({err})"),
}
}
}
impl std::error::Error for Error {}
#[derive(Default, Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize))]
struct ImageResponse {
images: Vec<String>,
}
#[derive(Default, Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize))]
struct ProgressResponse {
progress: f64,
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateTextToImageRequestOverrideSettings {
show_progress_every_n_steps: u32,
}
impl Default for ImaginateTextToImageRequestOverrideSettings {
fn default() -> Self {
Self {
show_progress_every_n_steps: PROGRESS_EVERY_N_STEPS,
}
}
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateImageToImageRequestOverrideSettings {
show_progress_every_n_steps: u32,
img2img_fix_steps: bool,
}
impl Default for ImaginateImageToImageRequestOverrideSettings {
fn default() -> Self {
Self {
show_progress_every_n_steps: PROGRESS_EVERY_N_STEPS,
img2img_fix_steps: true,
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateTextToImageRequest<'a> {
#[serde(flatten)]
common: ImaginateCommonImageRequest<'a>,
override_settings: ImaginateTextToImageRequestOverrideSettings,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateMask {
mask: String,
mask_blur: String,
inpainting_fill: u32,
inpaint_full_res: bool,
inpainting_mask_invert: u32,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateImageToImageRequest<'a> {
#[serde(flatten)]
common: ImaginateCommonImageRequest<'a>,
override_settings: ImaginateImageToImageRequestOverrideSettings,
init_images: Vec<String>,
denoising_strength: f64,
#[serde(flatten)]
mask: Option<ImaginateMask>,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateCommonImageRequest<'a> {
prompt: String,
seed: f64,
steps: u32,
cfg_scale: f64,
width: f64,
height: f64,
restore_faces: bool,
tiling: bool,
negative_prompt: String,
sampler_index: &'a str,
}
#[cfg(feature = "imaginate")]
#[allow(clippy::too_many_arguments)]
pub async fn imaginate<'a, P: Pixel>(
image: Image<P>,
editor_api: impl Future<Output = WasmEditorApi<'a>>,
controller: ImaginateController,
seed: impl Future<Output = f64>,
res: impl Future<Output = Option<DVec2>>,
samples: impl Future<Output = u32>,
sampling_method: impl Future<Output = ImaginateSamplingMethod>,
prompt_guidance: impl Future<Output = f32>,
prompt: impl Future<Output = String>,
negative_prompt: impl Future<Output = String>,
adapt_input_image: impl Future<Output = bool>,
image_creativity: impl Future<Output = f32>,
masking_layer: impl Future<Output = Option<Vec<u64>>>,
inpaint: impl Future<Output = bool>,
mask_blur: impl Future<Output = f32>,
mask_starting_fill: impl Future<Output = ImaginateMaskStartingFill>,
improve_faces: impl Future<Output = bool>,
tiling: impl Future<Output = bool>,
) -> Image<P> {
let WasmEditorApi {
node_graph_message_sender,
imaginate_preferences,
..
} = editor_api.await;
let set_progress = |progress: ImaginateStatus| {
controller.set_status(progress);
node_graph_message_sender.send(NodeGraphUpdateMessage::ImaginateStatusUpdate);
};
let host_name = imaginate_preferences.get_host_name();
imaginate_maybe_fail(
image,
host_name,
set_progress,
&controller,
seed,
res,
samples,
sampling_method,
prompt_guidance,
prompt,
negative_prompt,
adapt_input_image,
image_creativity,
masking_layer,
inpaint,
mask_blur,
mask_starting_fill,
improve_faces,
tiling,
)
.await
.unwrap_or_else(|err| {
match err {
Error::Terminated => {
set_progress(ImaginateStatus::Terminated);
}
err => {
error!("{err}");
set_progress(ImaginateStatus::Failed(err.to_string()));
}
};
Image::empty()
})
}
#[cfg(feature = "imaginate")]
#[allow(clippy::too_many_arguments)]
async fn imaginate_maybe_fail<'a, P: Pixel, F: Fn(ImaginateStatus)>(
image: Image<P>,
host_name: &str,
set_progress: F,
controller: &ImaginateController,
seed: impl Future<Output = f64>,
res: impl Future<Output = Option<DVec2>>,
samples: impl Future<Output = u32>,
sampling_method: impl Future<Output = ImaginateSamplingMethod>,
prompt_guidance: impl Future<Output = f32>,
prompt: impl Future<Output = String>,
negative_prompt: impl Future<Output = String>,
adapt_input_image: impl Future<Output = bool>,
image_creativity: impl Future<Output = f32>,
_masking_layer: impl Future<Output = Option<Vec<u64>>>,
_inpaint: impl Future<Output = bool>,
_mask_blur: impl Future<Output = f32>,
_mask_starting_fill: impl Future<Output = ImaginateMaskStartingFill>,
improve_faces: impl Future<Output = bool>,
tiling: impl Future<Output = bool>,
) -> Result<Image<P>, Error> {
set_progress(ImaginateStatus::Beginning);
let base_url: Url = parse_url(host_name)?;
let client = new_client()?;
let sampler_index = sampling_method.await;
let sampler_index = sampler_index.api_value();
let res = res.await.unwrap_or_else(|| {
let (width, height) = pick_safe_imaginate_resolution((image.width as _, image.height as _));
DVec2::new(width as _, height as _)
});
let common_request_data = ImaginateCommonImageRequest {
prompt: prompt.await,
seed: seed.await,
steps: samples.await,
cfg_scale: prompt_guidance.await as f64,
width: res.x,
height: res.y,
restore_faces: improve_faces.await,
tiling: tiling.await,
negative_prompt: negative_prompt.await,
sampler_index,
};
let request_builder = if adapt_input_image.await {
let base64_data = image_to_base64(image)?;
let request_data = ImaginateImageToImageRequest {
common: common_request_data,
override_settings: Default::default(),
init_images: vec![base64_data],
denoising_strength: image_creativity.await as f64 * 0.01,
mask: None,
};
let url = join_url(&base_url, SDAPI_IMAGE_TO_IMAGE)?;
client.post(url).json(&request_data)
} else {
let request_data = ImaginateTextToImageRequest {
common: common_request_data,
override_settings: Default::default(),
};
let url = join_url(&base_url, SDAPI_TEXT_TO_IMAGE)?;
client.post(url).json(&request_data)
};
let request = request_builder.header("Accept", "*/*").build().map_err(Error::RequestBuild)?;
let (response_future, abort_handle) = futures::future::abortable(client.execute(request));
controller.set_termination_handle(Box::new(ImaginateFutureAbortHandle(abort_handle)));
let progress_url = join_url(&base_url, SDAPI_PROGRESS)?;
futures::pin_mut!(response_future);
let response = loop {
let progress_request = new_get_request(&client, progress_url.clone())?;
let progress_response_future = client.execute(progress_request).and_then(|response| response.json());
futures::pin_mut!(progress_response_future);
response_future = match futures::future::select(response_future, progress_response_future).await {
Either::Left((response, _)) => break response,
Either::Right((progress, response_future)) => {
if let Ok(ProgressResponse { progress }) = progress {
set_progress(ImaginateStatus::Generating(progress * 100.));
}
response_future
}
};
};
let response = match response {
Ok(response) => response.and_then(reqwest::Response::error_for_status).map_err(Error::Request)?,
Err(_aborted) => {
set_progress(ImaginateStatus::Terminating);
let url = join_url(&base_url, SDAPI_TERMINATE)?;
let request = client.post(url).build().map_err(Error::RequestBuild)?;
// The user probably doesn't really care if the server side was really aborted or if there was an network error.
// So we fool them that the request was terminated if the termination request in reality failed.
let _ = client.execute(request).await.and_then(reqwest::Response::error_for_status).map_err(Error::TerminationFailed)?;
return Err(Error::Terminated);
}
};
set_progress(ImaginateStatus::Uploading);
let ImageResponse { images } = response.json().await.map_err(Error::ResponseFormat)?;
let result = images.into_iter().next().ok_or(Error::NoImage).and_then(base64_to_image)?;
set_progress(ImaginateStatus::ReadyDone);
Ok(result)
}
fn image_to_base64<P: Pixel>(image: Image<P>) -> Result<String, Error> {
use base64::prelude::*;
let Image { width, height, data } = image;
fn cast_with_f32<S: Pixel, D: image::Pixel<Subpixel = f32>>(data: Vec<S>, width: u32, height: u32) -> Result<DynamicImage, Error>
where
DynamicImage: From<ImageBuffer<D, Vec<f32>>>,
{
ImageBuffer::<D, Vec<f32>>::from_raw(width, height, bytemuck::cast_vec(data))
.ok_or(Error::InconsistentImageSize)
.map(Into::into)
}
let image: DynamicImage = match TypeId::of::<P>() {
id if id == TypeId::of::<Color>() => cast_with_f32::<_, image::Rgba<f32>>(data, width, height)?
// we need to do this cast, because png does not support rgba32f
.to_rgba16().into(),
id if id == TypeId::of::<Luma>() => cast_with_f32::<_, image::Luma<f32>>(data, width, height)?
// we need to do this cast, because png does not support luma32f
.to_luma16().into(),
_ => return Err(Error::UnsupportedPixelType(core::any::type_name::<P>())),
};
let mut png_data = std::io::Cursor::new(vec![]);
image.write_to(&mut png_data, ImageOutputFormat::Png).map_err(Error::ImageEncode)?;
Ok(BASE64_STANDARD.encode(png_data.into_inner()))
}
fn base64_to_image<D: AsRef<[u8]>, P: Pixel>(base64_data: D) -> Result<Image<P>, Error> {
use base64::prelude::*;
let png_data = BASE64_STANDARD.decode(base64_data).map_err(Error::Base64Decode)?;
let dyn_image = image::load_from_memory_with_format(&png_data, image::ImageFormat::Png).map_err(Error::ImageDecode)?;
let (width, height) = (dyn_image.width(), dyn_image.height());
let result_data: Vec<P> = match TypeId::of::<P>() {
id if id == TypeId::of::<Color>() => bytemuck::cast_vec(dyn_image.into_rgba32f().into_raw()),
id if id == TypeId::of::<Luma>() => bytemuck::cast_vec(dyn_image.to_luma32f().into_raw()),
_ => return Err(Error::UnsupportedPixelType(core::any::type_name::<P>())),
};
Ok(Image { data: result_data, width, height })
}
pub fn pick_safe_imaginate_resolution((width, height): (f64, f64)) -> (u64, u64) {
const MAX_RESOLUTION: u64 = 1000 * 1000;
// this is the maximum width/height that can be obtained
const MAX_DIMENSION: u64 = (MAX_RESOLUTION / 64) &!63;
// round the resolution to the nearest multiple of 64
let size = (DVec2::new(width, height).round().clamp(DVec2::ZERO, DVec2::splat(MAX_DIMENSION as _)).as_u64vec2() + U64Vec2::splat(32)).max(U64Vec2::splat(64)) &!U64Vec2::splat(63);
let resolution = size.x * size.y;
if resolution > MAX_RESOLUTION {
// scale down the image, so it is smaller than MAX_RESOLUTION
let scale = (MAX_RESOLUTION as f64 / resolution as f64).sqrt();
let size = size.as_dvec2() * scale;
if size.x < 64.0 {
// the image is extremely wide
(64, MAX_DIMENSION)
} else if size.y < 64.0 {
// the image is extremely high
(MAX_DIMENSION, 64)
} else {
// round down to a multiple of 64, so that the resolution still is smaller than MAX_RESOLUTION
(size.as_u64vec2() &!U64Vec2::splat(63)).into()
}
} else {
size.into()
}
}
| ImaginateFutureAbortHandle | identifier_name |
imaginate.rs | use crate::wasm_application_io::WasmEditorApi;
use core::any::TypeId;
use core::future::Future;
use futures::{future::Either, TryFutureExt};
use glam::{DVec2, U64Vec2};
use graph_craft::imaginate_input::{ImaginateController, ImaginateMaskStartingFill, ImaginatePreferences, ImaginateSamplingMethod, ImaginateServerStatus, ImaginateStatus, ImaginateTerminationHandle};
use graphene_core::application_io::NodeGraphUpdateMessage;
use graphene_core::raster::{Color, Image, Luma, Pixel};
use image::{DynamicImage, ImageBuffer, ImageOutputFormat};
use reqwest::Url;
const PROGRESS_EVERY_N_STEPS: u32 = 5;
const SDAPI_TEXT_TO_IMAGE: &str = "sdapi/v1/txt2img";
const SDAPI_IMAGE_TO_IMAGE: &str = "sdapi/v1/img2img";
const SDAPI_PROGRESS: &str = "sdapi/v1/progress?skip_current_image=true";
const SDAPI_TERMINATE: &str = "sdapi/v1/interrupt";
fn new_client() -> Result<reqwest::Client, Error> {
reqwest::ClientBuilder::new().build().map_err(Error::ClientBuild)
}
fn parse_url(url: &str) -> Result<Url, Error> {
url.try_into().map_err(|err| Error::UrlParse { text: url.into(), err })
}
fn join_url(base_url: &Url, path: &str) -> Result<Url, Error> {
base_url.join(path).map_err(|err| Error::UrlParse { text: base_url.to_string(), err })
}
fn new_get_request<U: reqwest::IntoUrl>(client: &reqwest::Client, url: U) -> Result<reqwest::Request, Error> {
client.get(url).header("Accept", "*/*").build().map_err(Error::RequestBuild)
}
pub struct ImaginatePersistentData {
pending_server_check: Option<futures::channel::oneshot::Receiver<reqwest::Result<reqwest::Response>>>,
host_name: Url,
client: Option<reqwest::Client>,
server_status: ImaginateServerStatus,
}
impl core::fmt::Debug for ImaginatePersistentData {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct(core::any::type_name::<Self>())
.field("pending_server_check", &self.pending_server_check.is_some())
.field("host_name", &self.host_name)
.field("status", &self.server_status)
.finish()
}
}
impl Default for ImaginatePersistentData {
fn default() -> Self {
let mut status = ImaginateServerStatus::default();
let client = new_client().map_err(|err| status = ImaginateServerStatus::Failed(err.to_string())).ok();
let ImaginatePreferences { host_name } = Default::default();
Self {
pending_server_check: None,
host_name: parse_url(&host_name).unwrap(),
client,
server_status: status,
}
}
}
type ImaginateFuture = core::pin::Pin<Box<dyn Future<Output = ()> +'static>>;
impl ImaginatePersistentData {
pub fn set_host_name(&mut self, name: &str) {
match parse_url(name) {
Ok(url) => self.host_name = url,
Err(err) => self.server_status = ImaginateServerStatus::Failed(err.to_string()),
}
}
fn initiate_server_check_maybe_fail(&mut self) -> Result<Option<ImaginateFuture>, Error> {
use futures::future::FutureExt;
let Some(client) = &self.client else {
return Ok(None);
};
if self.pending_server_check.is_some() {
return Ok(None);
}
self.server_status = ImaginateServerStatus::Checking;
let url = join_url(&self.host_name, SDAPI_PROGRESS)?;
let request = new_get_request(client, url)?;
let (send, recv) = futures::channel::oneshot::channel();
let response_future = client.execute(request).map(move |r| {
let _ = send.send(r);
});
self.pending_server_check = Some(recv);
Ok(Some(Box::pin(response_future)))
}
pub fn initiate_server_check(&mut self) -> Option<ImaginateFuture> {
match self.initiate_server_check_maybe_fail() {
Ok(f) => f,
Err(err) => {
self.server_status = ImaginateServerStatus::Failed(err.to_string());
None
}
}
}
pub fn poll_server_check(&mut self) {
if let Some(mut check) = self.pending_server_check.take() {
self.server_status = match check.try_recv().map(|r| r.map(|r| r.and_then(reqwest::Response::error_for_status))) {
Ok(Some(Ok(_response))) => ImaginateServerStatus::Connected,
Ok(Some(Err(_))) | Err(_) => ImaginateServerStatus::Unavailable,
Ok(None) => {
self.pending_server_check = Some(check);
ImaginateServerStatus::Checking
}
}
}
}
pub fn server_status(&self) -> &ImaginateServerStatus {
&self.server_status
}
pub fn is_checking(&self) -> bool {
matches!(self.server_status, ImaginateServerStatus::Checking)
}
}
#[derive(Debug)]
struct ImaginateFutureAbortHandle(futures::future::AbortHandle);
impl ImaginateTerminationHandle for ImaginateFutureAbortHandle {
fn terminate(&self) {
self.0.abort()
}
}
#[derive(Debug)]
enum Error {
UrlParse { text: String, err: <&'static str as TryInto<Url>>::Error },
ClientBuild(reqwest::Error),
RequestBuild(reqwest::Error),
Request(reqwest::Error),
ResponseFormat(reqwest::Error),
NoImage,
Base64Decode(base64::DecodeError),
ImageDecode(image::error::ImageError),
ImageEncode(image::error::ImageError),
UnsupportedPixelType(&'static str),
InconsistentImageSize,
Terminated,
TerminationFailed(reqwest::Error),
}
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
match self {
Self::UrlParse { text, err } => write!(f, "invalid url '{text}' ({err})"),
Self::ClientBuild(err) => write!(f, "failed to create a reqwest client ({err})"),
Self::RequestBuild(err) => write!(f, "failed to create a reqwest request ({err})"),
Self::Request(err) => write!(f, "request failed ({err})"),
Self::ResponseFormat(err) => write!(f, "got an invalid API response ({err})"),
Self::NoImage => write!(f, "got an empty API response"),
Self::Base64Decode(err) => write!(f, "failed to decode base64 encoded image ({err})"),
Self::ImageDecode(err) => write!(f, "failed to decode png image ({err})"),
Self::ImageEncode(err) => write!(f, "failed to encode png image ({err})"),
Self::UnsupportedPixelType(ty) => write!(f, "pixel type `{ty}` not supported for imaginate images"),
Self::InconsistentImageSize => write!(f, "image width and height do not match the image byte size"),
Self::Terminated => write!(f, "imaginate request was terminated by the user"),
Self::TerminationFailed(err) => write!(f, "termination failed ({err})"),
}
}
}
impl std::error::Error for Error {}
#[derive(Default, Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize))]
struct ImageResponse {
images: Vec<String>,
}
#[derive(Default, Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize))]
struct ProgressResponse {
progress: f64,
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateTextToImageRequestOverrideSettings {
show_progress_every_n_steps: u32,
}
impl Default for ImaginateTextToImageRequestOverrideSettings {
fn default() -> Self {
Self {
show_progress_every_n_steps: PROGRESS_EVERY_N_STEPS,
}
}
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateImageToImageRequestOverrideSettings {
show_progress_every_n_steps: u32,
img2img_fix_steps: bool,
}
impl Default for ImaginateImageToImageRequestOverrideSettings {
fn default() -> Self {
Self {
show_progress_every_n_steps: PROGRESS_EVERY_N_STEPS,
img2img_fix_steps: true,
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateTextToImageRequest<'a> {
#[serde(flatten)]
common: ImaginateCommonImageRequest<'a>,
override_settings: ImaginateTextToImageRequestOverrideSettings,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateMask {
mask: String,
mask_blur: String,
inpainting_fill: u32,
inpaint_full_res: bool,
inpainting_mask_invert: u32,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateImageToImageRequest<'a> {
#[serde(flatten)]
common: ImaginateCommonImageRequest<'a>,
override_settings: ImaginateImageToImageRequestOverrideSettings,
init_images: Vec<String>,
denoising_strength: f64,
#[serde(flatten)]
mask: Option<ImaginateMask>,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
struct ImaginateCommonImageRequest<'a> {
prompt: String,
seed: f64,
steps: u32,
cfg_scale: f64,
width: f64,
height: f64,
restore_faces: bool,
tiling: bool,
negative_prompt: String,
sampler_index: &'a str,
}
#[cfg(feature = "imaginate")]
#[allow(clippy::too_many_arguments)]
pub async fn imaginate<'a, P: Pixel>(
image: Image<P>,
editor_api: impl Future<Output = WasmEditorApi<'a>>,
controller: ImaginateController,
seed: impl Future<Output = f64>,
res: impl Future<Output = Option<DVec2>>,
samples: impl Future<Output = u32>,
sampling_method: impl Future<Output = ImaginateSamplingMethod>,
prompt_guidance: impl Future<Output = f32>,
prompt: impl Future<Output = String>,
negative_prompt: impl Future<Output = String>,
adapt_input_image: impl Future<Output = bool>,
image_creativity: impl Future<Output = f32>,
masking_layer: impl Future<Output = Option<Vec<u64>>>,
inpaint: impl Future<Output = bool>,
mask_blur: impl Future<Output = f32>,
mask_starting_fill: impl Future<Output = ImaginateMaskStartingFill>,
improve_faces: impl Future<Output = bool>,
tiling: impl Future<Output = bool>,
) -> Image<P> {
let WasmEditorApi {
node_graph_message_sender,
imaginate_preferences,
..
} = editor_api.await;
let set_progress = |progress: ImaginateStatus| {
controller.set_status(progress);
node_graph_message_sender.send(NodeGraphUpdateMessage::ImaginateStatusUpdate);
};
let host_name = imaginate_preferences.get_host_name();
imaginate_maybe_fail(
image,
host_name,
set_progress,
&controller,
seed,
res,
samples,
sampling_method,
prompt_guidance,
prompt,
negative_prompt,
adapt_input_image,
image_creativity,
masking_layer,
inpaint,
mask_blur,
mask_starting_fill,
improve_faces,
tiling,
)
.await
.unwrap_or_else(|err| {
match err {
Error::Terminated => {
set_progress(ImaginateStatus::Terminated);
}
err => {
error!("{err}");
set_progress(ImaginateStatus::Failed(err.to_string()));
}
};
Image::empty()
})
}
#[cfg(feature = "imaginate")]
#[allow(clippy::too_many_arguments)]
async fn imaginate_maybe_fail<'a, P: Pixel, F: Fn(ImaginateStatus)>(
image: Image<P>,
host_name: &str,
set_progress: F,
controller: &ImaginateController,
seed: impl Future<Output = f64>,
res: impl Future<Output = Option<DVec2>>,
samples: impl Future<Output = u32>,
sampling_method: impl Future<Output = ImaginateSamplingMethod>,
prompt_guidance: impl Future<Output = f32>,
prompt: impl Future<Output = String>,
negative_prompt: impl Future<Output = String>,
adapt_input_image: impl Future<Output = bool>,
image_creativity: impl Future<Output = f32>,
_masking_layer: impl Future<Output = Option<Vec<u64>>>,
_inpaint: impl Future<Output = bool>,
_mask_blur: impl Future<Output = f32>,
_mask_starting_fill: impl Future<Output = ImaginateMaskStartingFill>,
improve_faces: impl Future<Output = bool>,
tiling: impl Future<Output = bool>,
) -> Result<Image<P>, Error> {
set_progress(ImaginateStatus::Beginning);
let base_url: Url = parse_url(host_name)?;
let client = new_client()?;
let sampler_index = sampling_method.await;
let sampler_index = sampler_index.api_value();
let res = res.await.unwrap_or_else(|| {
let (width, height) = pick_safe_imaginate_resolution((image.width as _, image.height as _));
DVec2::new(width as _, height as _)
});
let common_request_data = ImaginateCommonImageRequest {
prompt: prompt.await, | width: res.x,
height: res.y,
restore_faces: improve_faces.await,
tiling: tiling.await,
negative_prompt: negative_prompt.await,
sampler_index,
};
let request_builder = if adapt_input_image.await {
let base64_data = image_to_base64(image)?;
let request_data = ImaginateImageToImageRequest {
common: common_request_data,
override_settings: Default::default(),
init_images: vec![base64_data],
denoising_strength: image_creativity.await as f64 * 0.01,
mask: None,
};
let url = join_url(&base_url, SDAPI_IMAGE_TO_IMAGE)?;
client.post(url).json(&request_data)
} else {
let request_data = ImaginateTextToImageRequest {
common: common_request_data,
override_settings: Default::default(),
};
let url = join_url(&base_url, SDAPI_TEXT_TO_IMAGE)?;
client.post(url).json(&request_data)
};
let request = request_builder.header("Accept", "*/*").build().map_err(Error::RequestBuild)?;
let (response_future, abort_handle) = futures::future::abortable(client.execute(request));
controller.set_termination_handle(Box::new(ImaginateFutureAbortHandle(abort_handle)));
let progress_url = join_url(&base_url, SDAPI_PROGRESS)?;
futures::pin_mut!(response_future);
let response = loop {
let progress_request = new_get_request(&client, progress_url.clone())?;
let progress_response_future = client.execute(progress_request).and_then(|response| response.json());
futures::pin_mut!(progress_response_future);
response_future = match futures::future::select(response_future, progress_response_future).await {
Either::Left((response, _)) => break response,
Either::Right((progress, response_future)) => {
if let Ok(ProgressResponse { progress }) = progress {
set_progress(ImaginateStatus::Generating(progress * 100.));
}
response_future
}
};
};
let response = match response {
Ok(response) => response.and_then(reqwest::Response::error_for_status).map_err(Error::Request)?,
Err(_aborted) => {
set_progress(ImaginateStatus::Terminating);
let url = join_url(&base_url, SDAPI_TERMINATE)?;
let request = client.post(url).build().map_err(Error::RequestBuild)?;
// The user probably doesn't really care if the server side was really aborted or if there was an network error.
// So we fool them that the request was terminated if the termination request in reality failed.
let _ = client.execute(request).await.and_then(reqwest::Response::error_for_status).map_err(Error::TerminationFailed)?;
return Err(Error::Terminated);
}
};
set_progress(ImaginateStatus::Uploading);
let ImageResponse { images } = response.json().await.map_err(Error::ResponseFormat)?;
let result = images.into_iter().next().ok_or(Error::NoImage).and_then(base64_to_image)?;
set_progress(ImaginateStatus::ReadyDone);
Ok(result)
}
fn image_to_base64<P: Pixel>(image: Image<P>) -> Result<String, Error> {
use base64::prelude::*;
let Image { width, height, data } = image;
fn cast_with_f32<S: Pixel, D: image::Pixel<Subpixel = f32>>(data: Vec<S>, width: u32, height: u32) -> Result<DynamicImage, Error>
where
DynamicImage: From<ImageBuffer<D, Vec<f32>>>,
{
ImageBuffer::<D, Vec<f32>>::from_raw(width, height, bytemuck::cast_vec(data))
.ok_or(Error::InconsistentImageSize)
.map(Into::into)
}
let image: DynamicImage = match TypeId::of::<P>() {
id if id == TypeId::of::<Color>() => cast_with_f32::<_, image::Rgba<f32>>(data, width, height)?
// we need to do this cast, because png does not support rgba32f
.to_rgba16().into(),
id if id == TypeId::of::<Luma>() => cast_with_f32::<_, image::Luma<f32>>(data, width, height)?
// we need to do this cast, because png does not support luma32f
.to_luma16().into(),
_ => return Err(Error::UnsupportedPixelType(core::any::type_name::<P>())),
};
let mut png_data = std::io::Cursor::new(vec![]);
image.write_to(&mut png_data, ImageOutputFormat::Png).map_err(Error::ImageEncode)?;
Ok(BASE64_STANDARD.encode(png_data.into_inner()))
}
fn base64_to_image<D: AsRef<[u8]>, P: Pixel>(base64_data: D) -> Result<Image<P>, Error> {
use base64::prelude::*;
let png_data = BASE64_STANDARD.decode(base64_data).map_err(Error::Base64Decode)?;
let dyn_image = image::load_from_memory_with_format(&png_data, image::ImageFormat::Png).map_err(Error::ImageDecode)?;
let (width, height) = (dyn_image.width(), dyn_image.height());
let result_data: Vec<P> = match TypeId::of::<P>() {
id if id == TypeId::of::<Color>() => bytemuck::cast_vec(dyn_image.into_rgba32f().into_raw()),
id if id == TypeId::of::<Luma>() => bytemuck::cast_vec(dyn_image.to_luma32f().into_raw()),
_ => return Err(Error::UnsupportedPixelType(core::any::type_name::<P>())),
};
Ok(Image { data: result_data, width, height })
}
pub fn pick_safe_imaginate_resolution((width, height): (f64, f64)) -> (u64, u64) {
const MAX_RESOLUTION: u64 = 1000 * 1000;
// this is the maximum width/height that can be obtained
const MAX_DIMENSION: u64 = (MAX_RESOLUTION / 64) &!63;
// round the resolution to the nearest multiple of 64
let size = (DVec2::new(width, height).round().clamp(DVec2::ZERO, DVec2::splat(MAX_DIMENSION as _)).as_u64vec2() + U64Vec2::splat(32)).max(U64Vec2::splat(64)) &!U64Vec2::splat(63);
let resolution = size.x * size.y;
if resolution > MAX_RESOLUTION {
// scale down the image, so it is smaller than MAX_RESOLUTION
let scale = (MAX_RESOLUTION as f64 / resolution as f64).sqrt();
let size = size.as_dvec2() * scale;
if size.x < 64.0 {
// the image is extremely wide
(64, MAX_DIMENSION)
} else if size.y < 64.0 {
// the image is extremely high
(MAX_DIMENSION, 64)
} else {
// round down to a multiple of 64, so that the resolution still is smaller than MAX_RESOLUTION
(size.as_u64vec2() &!U64Vec2::splat(63)).into()
}
} else {
size.into()
}
} | seed: seed.await,
steps: samples.await,
cfg_scale: prompt_guidance.await as f64, | random_line_split |
nn.rs | //! Neural networks
use crate::matrix::*;
/// a network
#[derive(Debug)]
pub struct Network {
// activation functions
activations: Vec<Box<dyn Activation>>,
// topology
topology: Vec<usize>,
// weights
weights: Vec<Matrix>
}
impl Network {
/// create a new random network with the given topology and activation functions
pub fn new_random(activations: Vec<Box<dyn Activation>>, topology: Vec<usize>) -> Network {
let l = topology.len();
assert!(l>1);
let rw = initial_random_weights(&topology);
assert_eq!(rw.len(), activations.len());
Network {
activations,
topology,
weights: rw
}
}
/// create a new network with the given topology and activation functions and initial weights
pub fn new(activations: Vec<Box<dyn Activation>>, topology: Vec<usize>, weights: &[f64]) -> Network {
let l = topology.len();
assert!(l>1);
let rw = initial_weights(&topology, weights);
assert_eq!(rw.len(), activations.len());
Network {
activations,
topology,
weights: rw
}
}
/// forward evaluation
pub fn forward(&self, inputs :&[f64]) -> Vec<Vec<f64>> |
/// back propagation
pub fn backward(&mut self, inputs :&[f64], outputs :Vec<Vec<f64>>, target :&[f64], learning_rate: f64 ) {
debug!("Error: {}", error(target, outputs.last().expect("outputs")));
let l = outputs.len();
let mut new_weights = self.weights.clone();
let mut new_targets = vec!();
for (order,weights) in self.weights.iter().rev().enumerate() {
let rev_order = l-order-1;
let previous_outputs = if rev_order>0 {
&outputs[rev_order-1]
} else {
inputs
};
let previous_size = size(&weights).0;
debug!("previous size: {}",previous_size);
debug!("weights to update: {:?}",size(&weights));
new_targets.push(vec!(0.0; previous_size));
for (i,o) in outputs[rev_order].iter().enumerate() {
let diff = if order==0 {
o - target[i]
} else {
new_targets[order-1][i]
};
let deriv = self.activations[rev_order].derive(&outputs[rev_order],i);
let d = diff * deriv;
debug!("Output: {} -> {}", o, d);
for r in 0..previous_size{
let w = get(&weights,r,i);
let p = if r<previous_outputs.len() {previous_outputs[r]} else {1.0};
let uw = update_weight(w, d * p, learning_rate);
debug!("Weight for row {}: {} -> {} -> {}", r, w, uw, w*d);
set(&mut new_weights[rev_order],r,i,uw);
new_targets[order][r]+=w*d;
}
}
debug!("New targets: {:?}",new_targets);
}
debug!("Before backprop: {:?}",self.weights);
self.weights=new_weights;
}
/// train for one input and one target
pub fn train(&mut self, inputs :&[f64], target :&[f64], learning_rate: f64, max_iter: usize, max_error: f64) -> (usize,f64) {
let mut err;
for it in 0..max_iter {
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
if err < max_error {
return (it,err);
}
self.backward(inputs,outputs,target,learning_rate);
}
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
(max_iter,err)
}
/// online training for multiple input/targets
pub fn train_online(&mut self, tests: &[Test], learning_rate: f64, max_iter: usize) -> f64 {
for _ in 0..max_iter {
for test in tests {
let outputs = self.forward(&test.input);
self.backward(&test.input,outputs,&test.target,learning_rate);
}
}
tests.iter().map(|t| {
let outputs = self.forward(&t.input);
error(&t.target, outputs.last().expect("outputs"))
}).sum()
}
}
// generate random initial weights
fn initial_random_weights(topology: &Vec<usize>) -> Vec<Matrix> {
let mut v = Vec::new();
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
v.push(Matrix::rand_range(psz+1,sz,0.0,1.0));
}
Some(sz)
}
);
assert_eq!(topology.len()-1,v.len());
v
}
// build a network from initial weights
fn initial_weights(topology: &Vec<usize>, weights: &[f64]) -> Vec<Matrix> {
let mut v = Vec::new();
let mut st = 0;
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
let end =st + (psz+1) * sz;
assert!(end <= weights.len());
v.push(Matrix::new(psz+1,sz,&weights[st..end]));
st = end;
}
Some(sz)
}
);
assert_eq!(st,weights.len());
assert_eq!(topology.len()-1,v.len());
v
}
/// sigmoid function
pub fn sigmoid(i: f64) -> f64 {
f64::powi(1.0 + f64::exp(-i),-1)
}
/// hyperbolic tangent
pub fn hyptan(i: f64) -> f64 {
let minus2 = f64::exp(-2.0 * i);
(1.0-minus2)/(1.0+minus2)
}
/// RELU function
pub fn relu(i:f64) -> f64 {
f64::max(0.0,i)
}
/// softmax function
pub fn softmax(v: &[f64]) -> Vec<f64> {
let mut v2 = Vec::with_capacity(v.len());
let d = v.iter().max_by(|x,y| x.partial_cmp(y).expect("NaN")).expect("empty vector");
let s = v.iter().fold(0.0,|s,w|{
let e=f64::exp(*w-d);
v2.push(e);
s+e
});
if s == 0.0 {
v2
} else {
v2.iter().map(|w| w/s).collect()
}
}
/// error: sum of errors squared
pub fn error(target: &[f64], output: &[f64]) -> f64 {
target.iter().zip(output.iter()).map(|(t,o)| f64::powi(t-o,2)/2.0).sum()
}
/*fn diff_deriv(target: f64, output: f64) -> f64 {
let diff = output - target;
let deriv = output * (1.0 - output);
diff * deriv
}
fn weight_error(target: f64, output: f64, weighted_input: f64) -> f64 {
diff_deriv(target,output) * weighted_input
}*/
// get the updated value for a weight
fn update_weight(old: f64, error: f64, learning_rate: f64) -> f64 {
old - error * learning_rate
}
/// an activation function
pub trait Activation : std::fmt::Debug {
/// forward activation of all inputs
fn activate(&self, inputs: &[f64]) -> Vec<f64>;
/// derivation for one output given all the outputs and the output index
fn derive(&self, outputs: &[f64], index: usize) -> f64;
}
/// Sigmoid activation function
#[derive(Debug)]
pub struct Sigmoid{}
impl Activation for Sigmoid {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| sigmoid(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
outputs[index] * (1.0 - outputs[index])
}
}
/// Relu activation function
#[derive(Debug)]
pub struct Relu{}
impl Activation for Relu {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| relu(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
if outputs[index] > 0.0 {1.0} else {0.0}
}
}
/// Softmax activation function
#[derive(Debug)]
pub struct Softmax{}
impl Activation for Softmax {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
softmax(inputs)
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
let s: f64 = outputs.iter().sum();
let el = outputs[index];
(s-el)*el / s.powi(2)
}
}
/// Encapsulate one possible input and the target output, for training
pub struct Test {
pub input: Vec<f64>,
pub target: Vec<f64>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mattmazur() {
// <https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/>
let mut nn = Network::new(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(2,2,2),&vec!(
0.15, 0.25, 0.2, 0.3, 0.35, 0.35,
0.4, 0.5, 0.45, 0.55, 0.6, 0.6
));
let f1s = nn.forward(&vec!(0.05,0.1));
assert_eq!(f1s[0],vec!(0.5932699921071872, 0.596884378259767));
assert_eq!(f1s[1],vec!(0.7513650695523157, 0.7729284653214625));
assert_eq!(0.2983711087600027,error(&vec!(0.01,0.99), &f1s[1]));
// assert_eq!(0.08216704056423078, weight_error(0.01, 0.7513650695523157, f1s[0][0]));
assert_eq!(0.35891647971788465, update_weight(0.4, 0.08216704056423078, 0.5));
nn.backward(&vec!(0.05,0.1),f1s,&vec!(0.01,0.99), 0.5);
let f2s = nn.forward(&vec!(0.05,0.1));
//assert_eq!(0.29102777369359933,error(&vec!(0.01,0.99), &f2s[1]));
// we update the biases too
assert_eq!(0.28047144679143016,error(&vec!(0.01,0.99), &f2s[1]));
let last_error=nn.train(&vec!(0.05,0.1),&vec!(0.01,0.99),0.5,10000,0.0000000001);
assert!(last_error.1<0.00001);
assert!(last_error.0<=10000);
}
#[test]
fn test_becominghuman(){
// <https://becominghuman.ai/making-a-simple-neural-network-2ea1de81ec20>
let mut nn = Network::new_random(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(4,4,1));
let tests = vec!(
Test{input:vec!(1.0,0.0,0.0,0.0),target:vec!(0.0)},
Test{input:vec!(0.0,1.0,0.0,0.0),target:vec!(0.0)},
Test{input:vec!(0.0,0.0,1.0,0.0),target:vec!(1.0)},
Test{input:vec!(0.0,0.0,0.0,1.0),target:vec!(0.0)},
);
let last_error=nn.train_online(&tests,0.5,10000);
println!("last: {}",last_error);
assert!(last_error<0.001);
}
#[test]
fn test_towards(){
// <https://towardsdatascience.com/how-to-build-your-own-neural-network-from-scratch-in-python-68998a08e4f6>
let mut nn = Network::new_random(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(3,4,1));
let tests = vec!(
Test{input:vec!(0.0,0.0,1.0),target:vec!(0.0)},
Test{input:vec!(0.0,1.0,1.0),target:vec!(1.0)},
Test{input:vec!(1.0,0.0,1.0),target:vec!(1.0)},
Test{input:vec!(1.0,1.0,1.0),target:vec!(0.0)},
);
let last_error=nn.train_online(&tests,0.5,1500);
println!("last: {}",last_error);
assert!(last_error<0.05);
}
#[test]
fn test_softmax(){
assert_eq!(vec!(0.09003057317038046, 0.24472847105479764, 0.6652409557748218),softmax(&vec!(1.0,2.0,3.0)));
assert_eq!(vec!(0.0, 0.0, 1.0),softmax(&vec!(1000.0,2000.0,3000.0)));
}
}
| {
assert_eq!(self.topology[0],inputs.len());
let mut m = Matrix::new(1,inputs.len(),inputs);
let mut all_results = Vec::with_capacity(self.topology.len() - 1);
self.weights.iter().enumerate().for_each(| (ix,wm) | {
add_column(&mut m,vec!(1.0));
m = mul(&m,wm);
//println!("after mul: {:?}",m);
let acts = self.activations[ix].activate(&get_data(&m));
m = Matrix::new(size(&m).0,size(&m).1,&acts);
//println!("after activation: {:?}",m);
all_results.push(acts);
});
assert_eq!(*self.topology.last().expect("empty topology!"),
all_results.last().expect("empty result!").len());
all_results
} | identifier_body |
nn.rs | //! Neural networks
use crate::matrix::*;
/// a network
#[derive(Debug)]
pub struct Network {
// activation functions
activations: Vec<Box<dyn Activation>>,
// topology
topology: Vec<usize>,
// weights
weights: Vec<Matrix>
}
impl Network {
/// create a new random network with the given topology and activation functions
pub fn new_random(activations: Vec<Box<dyn Activation>>, topology: Vec<usize>) -> Network {
let l = topology.len();
assert!(l>1);
let rw = initial_random_weights(&topology);
assert_eq!(rw.len(), activations.len());
Network {
activations,
topology,
weights: rw
}
}
/// create a new network with the given topology and activation functions and initial weights
pub fn new(activations: Vec<Box<dyn Activation>>, topology: Vec<usize>, weights: &[f64]) -> Network {
let l = topology.len();
assert!(l>1);
let rw = initial_weights(&topology, weights);
assert_eq!(rw.len(), activations.len());
Network {
activations,
topology,
weights: rw
}
}
/// forward evaluation
pub fn forward(&self, inputs :&[f64]) -> Vec<Vec<f64>> {
assert_eq!(self.topology[0],inputs.len());
let mut m = Matrix::new(1,inputs.len(),inputs);
let mut all_results = Vec::with_capacity(self.topology.len() - 1);
self.weights.iter().enumerate().for_each(| (ix,wm) | {
add_column(&mut m,vec!(1.0));
m = mul(&m,wm);
//println!("after mul: {:?}",m);
let acts = self.activations[ix].activate(&get_data(&m));
m = Matrix::new(size(&m).0,size(&m).1,&acts);
//println!("after activation: {:?}",m);
all_results.push(acts);
});
assert_eq!(*self.topology.last().expect("empty topology!"),
all_results.last().expect("empty result!").len());
all_results
}
/// back propagation
pub fn backward(&mut self, inputs :&[f64], outputs :Vec<Vec<f64>>, target :&[f64], learning_rate: f64 ) {
debug!("Error: {}", error(target, outputs.last().expect("outputs")));
let l = outputs.len();
let mut new_weights = self.weights.clone();
let mut new_targets = vec!();
for (order,weights) in self.weights.iter().rev().enumerate() {
let rev_order = l-order-1;
let previous_outputs = if rev_order>0 {
&outputs[rev_order-1]
} else {
inputs
};
let previous_size = size(&weights).0;
debug!("previous size: {}",previous_size);
debug!("weights to update: {:?}",size(&weights));
new_targets.push(vec!(0.0; previous_size));
for (i,o) in outputs[rev_order].iter().enumerate() {
let diff = if order==0 {
o - target[i]
} else {
new_targets[order-1][i]
};
let deriv = self.activations[rev_order].derive(&outputs[rev_order],i);
let d = diff * deriv;
debug!("Output: {} -> {}", o, d);
for r in 0..previous_size{
let w = get(&weights,r,i);
let p = if r<previous_outputs.len() {previous_outputs[r]} else {1.0};
let uw = update_weight(w, d * p, learning_rate);
debug!("Weight for row {}: {} -> {} -> {}", r, w, uw, w*d);
set(&mut new_weights[rev_order],r,i,uw);
new_targets[order][r]+=w*d;
}
}
debug!("New targets: {:?}",new_targets);
}
debug!("Before backprop: {:?}",self.weights);
self.weights=new_weights;
}
/// train for one input and one target
pub fn train(&mut self, inputs :&[f64], target :&[f64], learning_rate: f64, max_iter: usize, max_error: f64) -> (usize,f64) {
let mut err;
for it in 0..max_iter {
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
if err < max_error {
return (it,err);
}
self.backward(inputs,outputs,target,learning_rate);
}
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
(max_iter,err)
}
/// online training for multiple input/targets
pub fn train_online(&mut self, tests: &[Test], learning_rate: f64, max_iter: usize) -> f64 {
for _ in 0..max_iter {
for test in tests {
let outputs = self.forward(&test.input);
self.backward(&test.input,outputs,&test.target,learning_rate);
}
}
tests.iter().map(|t| {
let outputs = self.forward(&t.input);
error(&t.target, outputs.last().expect("outputs"))
}).sum()
}
}
// generate random initial weights
fn initial_random_weights(topology: &Vec<usize>) -> Vec<Matrix> {
let mut v = Vec::new();
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
v.push(Matrix::rand_range(psz+1,sz,0.0,1.0));
}
Some(sz)
}
);
assert_eq!(topology.len()-1,v.len());
v
}
// build a network from initial weights
fn initial_weights(topology: &Vec<usize>, weights: &[f64]) -> Vec<Matrix> {
let mut v = Vec::new();
let mut st = 0;
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
let end =st + (psz+1) * sz;
assert!(end <= weights.len());
v.push(Matrix::new(psz+1,sz,&weights[st..end]));
st = end;
}
Some(sz)
}
);
assert_eq!(st,weights.len());
assert_eq!(topology.len()-1,v.len());
v
}
/// sigmoid function
pub fn sigmoid(i: f64) -> f64 {
f64::powi(1.0 + f64::exp(-i),-1)
}
/// hyperbolic tangent
pub fn hyptan(i: f64) -> f64 {
let minus2 = f64::exp(-2.0 * i);
(1.0-minus2)/(1.0+minus2)
}
/// RELU function
pub fn relu(i:f64) -> f64 {
f64::max(0.0,i)
}
/// softmax function
pub fn softmax(v: &[f64]) -> Vec<f64> {
let mut v2 = Vec::with_capacity(v.len());
let d = v.iter().max_by(|x,y| x.partial_cmp(y).expect("NaN")).expect("empty vector");
let s = v.iter().fold(0.0,|s,w|{
let e=f64::exp(*w-d);
v2.push(e);
s+e
});
if s == 0.0 {
v2
} else {
v2.iter().map(|w| w/s).collect()
}
}
/// error: sum of errors squared
pub fn error(target: &[f64], output: &[f64]) -> f64 {
target.iter().zip(output.iter()).map(|(t,o)| f64::powi(t-o,2)/2.0).sum()
}
/*fn diff_deriv(target: f64, output: f64) -> f64 {
let diff = output - target;
let deriv = output * (1.0 - output);
diff * deriv
}
fn weight_error(target: f64, output: f64, weighted_input: f64) -> f64 {
diff_deriv(target,output) * weighted_input
}*/
// get the updated value for a weight
fn update_weight(old: f64, error: f64, learning_rate: f64) -> f64 {
old - error * learning_rate
}
/// an activation function
pub trait Activation : std::fmt::Debug {
/// forward activation of all inputs
fn activate(&self, inputs: &[f64]) -> Vec<f64>;
/// derivation for one output given all the outputs and the output index
fn derive(&self, outputs: &[f64], index: usize) -> f64;
}
/// Sigmoid activation function
#[derive(Debug)]
pub struct Sigmoid{}
impl Activation for Sigmoid {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| sigmoid(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
outputs[index] * (1.0 - outputs[index])
}
}
/// Relu activation function
#[derive(Debug)]
pub struct Relu{}
impl Activation for Relu {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| relu(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
if outputs[index] > 0.0 {1.0} else |
}
}
/// Softmax activation function
#[derive(Debug)]
pub struct Softmax{}
impl Activation for Softmax {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
softmax(inputs)
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
let s: f64 = outputs.iter().sum();
let el = outputs[index];
(s-el)*el / s.powi(2)
}
}
/// Encapsulate one possible input and the target output, for training
pub struct Test {
pub input: Vec<f64>,
pub target: Vec<f64>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mattmazur() {
// <https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/>
let mut nn = Network::new(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(2,2,2),&vec!(
0.15, 0.25, 0.2, 0.3, 0.35, 0.35,
0.4, 0.5, 0.45, 0.55, 0.6, 0.6
));
let f1s = nn.forward(&vec!(0.05,0.1));
assert_eq!(f1s[0],vec!(0.5932699921071872, 0.596884378259767));
assert_eq!(f1s[1],vec!(0.7513650695523157, 0.7729284653214625));
assert_eq!(0.2983711087600027,error(&vec!(0.01,0.99), &f1s[1]));
// assert_eq!(0.08216704056423078, weight_error(0.01, 0.7513650695523157, f1s[0][0]));
assert_eq!(0.35891647971788465, update_weight(0.4, 0.08216704056423078, 0.5));
nn.backward(&vec!(0.05,0.1),f1s,&vec!(0.01,0.99), 0.5);
let f2s = nn.forward(&vec!(0.05,0.1));
//assert_eq!(0.29102777369359933,error(&vec!(0.01,0.99), &f2s[1]));
// we update the biases too
assert_eq!(0.28047144679143016,error(&vec!(0.01,0.99), &f2s[1]));
let last_error=nn.train(&vec!(0.05,0.1),&vec!(0.01,0.99),0.5,10000,0.0000000001);
assert!(last_error.1<0.00001);
assert!(last_error.0<=10000);
}
#[test]
fn test_becominghuman(){
// <https://becominghuman.ai/making-a-simple-neural-network-2ea1de81ec20>
let mut nn = Network::new_random(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(4,4,1));
let tests = vec!(
Test{input:vec!(1.0,0.0,0.0,0.0),target:vec!(0.0)},
Test{input:vec!(0.0,1.0,0.0,0.0),target:vec!(0.0)},
Test{input:vec!(0.0,0.0,1.0,0.0),target:vec!(1.0)},
Test{input:vec!(0.0,0.0,0.0,1.0),target:vec!(0.0)},
);
let last_error=nn.train_online(&tests,0.5,10000);
println!("last: {}",last_error);
assert!(last_error<0.001);
}
#[test]
fn test_towards(){
// <https://towardsdatascience.com/how-to-build-your-own-neural-network-from-scratch-in-python-68998a08e4f6>
let mut nn = Network::new_random(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(3,4,1));
let tests = vec!(
Test{input:vec!(0.0,0.0,1.0),target:vec!(0.0)},
Test{input:vec!(0.0,1.0,1.0),target:vec!(1.0)},
Test{input:vec!(1.0,0.0,1.0),target:vec!(1.0)},
Test{input:vec!(1.0,1.0,1.0),target:vec!(0.0)},
);
let last_error=nn.train_online(&tests,0.5,1500);
println!("last: {}",last_error);
assert!(last_error<0.05);
}
#[test]
fn test_softmax(){
assert_eq!(vec!(0.09003057317038046, 0.24472847105479764, 0.6652409557748218),softmax(&vec!(1.0,2.0,3.0)));
assert_eq!(vec!(0.0, 0.0, 1.0),softmax(&vec!(1000.0,2000.0,3000.0)));
}
}
| {0.0} | conditional_block |
nn.rs | //! Neural networks
use crate::matrix::*;
/// a network
#[derive(Debug)]
pub struct Network {
// activation functions
activations: Vec<Box<dyn Activation>>,
// topology
topology: Vec<usize>,
// weights
weights: Vec<Matrix>
}
impl Network {
/// create a new random network with the given topology and activation functions
pub fn new_random(activations: Vec<Box<dyn Activation>>, topology: Vec<usize>) -> Network {
let l = topology.len();
assert!(l>1);
let rw = initial_random_weights(&topology);
assert_eq!(rw.len(), activations.len());
Network {
activations,
topology,
weights: rw
}
}
/// create a new network with the given topology and activation functions and initial weights
pub fn new(activations: Vec<Box<dyn Activation>>, topology: Vec<usize>, weights: &[f64]) -> Network {
let l = topology.len();
assert!(l>1);
let rw = initial_weights(&topology, weights);
assert_eq!(rw.len(), activations.len());
Network {
activations,
topology,
weights: rw
}
}
/// forward evaluation
pub fn forward(&self, inputs :&[f64]) -> Vec<Vec<f64>> {
assert_eq!(self.topology[0],inputs.len());
let mut m = Matrix::new(1,inputs.len(),inputs);
let mut all_results = Vec::with_capacity(self.topology.len() - 1);
self.weights.iter().enumerate().for_each(| (ix,wm) | {
add_column(&mut m,vec!(1.0));
m = mul(&m,wm);
//println!("after mul: {:?}",m);
let acts = self.activations[ix].activate(&get_data(&m));
m = Matrix::new(size(&m).0,size(&m).1,&acts);
//println!("after activation: {:?}",m);
all_results.push(acts);
});
assert_eq!(*self.topology.last().expect("empty topology!"),
all_results.last().expect("empty result!").len());
all_results
}
/// back propagation
pub fn backward(&mut self, inputs :&[f64], outputs :Vec<Vec<f64>>, target :&[f64], learning_rate: f64 ) {
debug!("Error: {}", error(target, outputs.last().expect("outputs")));
let l = outputs.len();
let mut new_weights = self.weights.clone();
let mut new_targets = vec!();
for (order,weights) in self.weights.iter().rev().enumerate() {
let rev_order = l-order-1;
let previous_outputs = if rev_order>0 {
&outputs[rev_order-1] | inputs
};
let previous_size = size(&weights).0;
debug!("previous size: {}",previous_size);
debug!("weights to update: {:?}",size(&weights));
new_targets.push(vec!(0.0; previous_size));
for (i,o) in outputs[rev_order].iter().enumerate() {
let diff = if order==0 {
o - target[i]
} else {
new_targets[order-1][i]
};
let deriv = self.activations[rev_order].derive(&outputs[rev_order],i);
let d = diff * deriv;
debug!("Output: {} -> {}", o, d);
for r in 0..previous_size{
let w = get(&weights,r,i);
let p = if r<previous_outputs.len() {previous_outputs[r]} else {1.0};
let uw = update_weight(w, d * p, learning_rate);
debug!("Weight for row {}: {} -> {} -> {}", r, w, uw, w*d);
set(&mut new_weights[rev_order],r,i,uw);
new_targets[order][r]+=w*d;
}
}
debug!("New targets: {:?}",new_targets);
}
debug!("Before backprop: {:?}",self.weights);
self.weights=new_weights;
}
/// train for one input and one target
pub fn train(&mut self, inputs :&[f64], target :&[f64], learning_rate: f64, max_iter: usize, max_error: f64) -> (usize,f64) {
let mut err;
for it in 0..max_iter {
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
if err < max_error {
return (it,err);
}
self.backward(inputs,outputs,target,learning_rate);
}
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
(max_iter,err)
}
/// online training for multiple input/targets
pub fn train_online(&mut self, tests: &[Test], learning_rate: f64, max_iter: usize) -> f64 {
for _ in 0..max_iter {
for test in tests {
let outputs = self.forward(&test.input);
self.backward(&test.input,outputs,&test.target,learning_rate);
}
}
tests.iter().map(|t| {
let outputs = self.forward(&t.input);
error(&t.target, outputs.last().expect("outputs"))
}).sum()
}
}
// generate random initial weights
fn initial_random_weights(topology: &Vec<usize>) -> Vec<Matrix> {
let mut v = Vec::new();
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
v.push(Matrix::rand_range(psz+1,sz,0.0,1.0));
}
Some(sz)
}
);
assert_eq!(topology.len()-1,v.len());
v
}
// build a network from initial weights
fn initial_weights(topology: &Vec<usize>, weights: &[f64]) -> Vec<Matrix> {
let mut v = Vec::new();
let mut st = 0;
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
let end =st + (psz+1) * sz;
assert!(end <= weights.len());
v.push(Matrix::new(psz+1,sz,&weights[st..end]));
st = end;
}
Some(sz)
}
);
assert_eq!(st,weights.len());
assert_eq!(topology.len()-1,v.len());
v
}
/// sigmoid function
pub fn sigmoid(i: f64) -> f64 {
f64::powi(1.0 + f64::exp(-i),-1)
}
/// hyperbolic tangent
pub fn hyptan(i: f64) -> f64 {
let minus2 = f64::exp(-2.0 * i);
(1.0-minus2)/(1.0+minus2)
}
/// RELU function
pub fn relu(i:f64) -> f64 {
f64::max(0.0,i)
}
/// softmax function
pub fn softmax(v: &[f64]) -> Vec<f64> {
let mut v2 = Vec::with_capacity(v.len());
let d = v.iter().max_by(|x,y| x.partial_cmp(y).expect("NaN")).expect("empty vector");
let s = v.iter().fold(0.0,|s,w|{
let e=f64::exp(*w-d);
v2.push(e);
s+e
});
if s == 0.0 {
v2
} else {
v2.iter().map(|w| w/s).collect()
}
}
/// error: sum of errors squared
pub fn error(target: &[f64], output: &[f64]) -> f64 {
target.iter().zip(output.iter()).map(|(t,o)| f64::powi(t-o,2)/2.0).sum()
}
/*fn diff_deriv(target: f64, output: f64) -> f64 {
let diff = output - target;
let deriv = output * (1.0 - output);
diff * deriv
}
fn weight_error(target: f64, output: f64, weighted_input: f64) -> f64 {
diff_deriv(target,output) * weighted_input
}*/
// get the updated value for a weight
fn update_weight(old: f64, error: f64, learning_rate: f64) -> f64 {
old - error * learning_rate
}
/// an activation function
pub trait Activation : std::fmt::Debug {
/// forward activation of all inputs
fn activate(&self, inputs: &[f64]) -> Vec<f64>;
/// derivation for one output given all the outputs and the output index
fn derive(&self, outputs: &[f64], index: usize) -> f64;
}
/// Sigmoid activation function
#[derive(Debug)]
pub struct Sigmoid{}
impl Activation for Sigmoid {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| sigmoid(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
outputs[index] * (1.0 - outputs[index])
}
}
/// Relu activation function
#[derive(Debug)]
pub struct Relu{}
impl Activation for Relu {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| relu(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
if outputs[index] > 0.0 {1.0} else {0.0}
}
}
/// Softmax activation function
#[derive(Debug)]
pub struct Softmax{}
impl Activation for Softmax {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
softmax(inputs)
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
let s: f64 = outputs.iter().sum();
let el = outputs[index];
(s-el)*el / s.powi(2)
}
}
/// Encapsulate one possible input and the target output, for training
pub struct Test {
pub input: Vec<f64>,
pub target: Vec<f64>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mattmazur() {
// <https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/>
let mut nn = Network::new(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(2,2,2),&vec!(
0.15, 0.25, 0.2, 0.3, 0.35, 0.35,
0.4, 0.5, 0.45, 0.55, 0.6, 0.6
));
let f1s = nn.forward(&vec!(0.05,0.1));
assert_eq!(f1s[0],vec!(0.5932699921071872, 0.596884378259767));
assert_eq!(f1s[1],vec!(0.7513650695523157, 0.7729284653214625));
assert_eq!(0.2983711087600027,error(&vec!(0.01,0.99), &f1s[1]));
// assert_eq!(0.08216704056423078, weight_error(0.01, 0.7513650695523157, f1s[0][0]));
assert_eq!(0.35891647971788465, update_weight(0.4, 0.08216704056423078, 0.5));
nn.backward(&vec!(0.05,0.1),f1s,&vec!(0.01,0.99), 0.5);
let f2s = nn.forward(&vec!(0.05,0.1));
//assert_eq!(0.29102777369359933,error(&vec!(0.01,0.99), &f2s[1]));
// we update the biases too
assert_eq!(0.28047144679143016,error(&vec!(0.01,0.99), &f2s[1]));
let last_error=nn.train(&vec!(0.05,0.1),&vec!(0.01,0.99),0.5,10000,0.0000000001);
assert!(last_error.1<0.00001);
assert!(last_error.0<=10000);
}
#[test]
fn test_becominghuman(){
// <https://becominghuman.ai/making-a-simple-neural-network-2ea1de81ec20>
let mut nn = Network::new_random(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(4,4,1));
let tests = vec!(
Test{input:vec!(1.0,0.0,0.0,0.0),target:vec!(0.0)},
Test{input:vec!(0.0,1.0,0.0,0.0),target:vec!(0.0)},
Test{input:vec!(0.0,0.0,1.0,0.0),target:vec!(1.0)},
Test{input:vec!(0.0,0.0,0.0,1.0),target:vec!(0.0)},
);
let last_error=nn.train_online(&tests,0.5,10000);
println!("last: {}",last_error);
assert!(last_error<0.001);
}
#[test]
fn test_towards(){
// <https://towardsdatascience.com/how-to-build-your-own-neural-network-from-scratch-in-python-68998a08e4f6>
let mut nn = Network::new_random(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(3,4,1));
let tests = vec!(
Test{input:vec!(0.0,0.0,1.0),target:vec!(0.0)},
Test{input:vec!(0.0,1.0,1.0),target:vec!(1.0)},
Test{input:vec!(1.0,0.0,1.0),target:vec!(1.0)},
Test{input:vec!(1.0,1.0,1.0),target:vec!(0.0)},
);
let last_error=nn.train_online(&tests,0.5,1500);
println!("last: {}",last_error);
assert!(last_error<0.05);
}
#[test]
fn test_softmax(){
assert_eq!(vec!(0.09003057317038046, 0.24472847105479764, 0.6652409557748218),softmax(&vec!(1.0,2.0,3.0)));
assert_eq!(vec!(0.0, 0.0, 1.0),softmax(&vec!(1000.0,2000.0,3000.0)));
}
} | } else { | random_line_split |
nn.rs | //! Neural networks
use crate::matrix::*;
/// a network
#[derive(Debug)]
pub struct Network {
// activation functions
activations: Vec<Box<dyn Activation>>,
// topology
topology: Vec<usize>,
// weights
weights: Vec<Matrix>
}
impl Network {
/// create a new random network with the given topology and activation functions
pub fn new_random(activations: Vec<Box<dyn Activation>>, topology: Vec<usize>) -> Network {
let l = topology.len();
assert!(l>1);
let rw = initial_random_weights(&topology);
assert_eq!(rw.len(), activations.len());
Network {
activations,
topology,
weights: rw
}
}
/// create a new network with the given topology and activation functions and initial weights
pub fn new(activations: Vec<Box<dyn Activation>>, topology: Vec<usize>, weights: &[f64]) -> Network {
let l = topology.len();
assert!(l>1);
let rw = initial_weights(&topology, weights);
assert_eq!(rw.len(), activations.len());
Network {
activations,
topology,
weights: rw
}
}
/// forward evaluation
pub fn | (&self, inputs :&[f64]) -> Vec<Vec<f64>> {
assert_eq!(self.topology[0],inputs.len());
let mut m = Matrix::new(1,inputs.len(),inputs);
let mut all_results = Vec::with_capacity(self.topology.len() - 1);
self.weights.iter().enumerate().for_each(| (ix,wm) | {
add_column(&mut m,vec!(1.0));
m = mul(&m,wm);
//println!("after mul: {:?}",m);
let acts = self.activations[ix].activate(&get_data(&m));
m = Matrix::new(size(&m).0,size(&m).1,&acts);
//println!("after activation: {:?}",m);
all_results.push(acts);
});
assert_eq!(*self.topology.last().expect("empty topology!"),
all_results.last().expect("empty result!").len());
all_results
}
/// back propagation
pub fn backward(&mut self, inputs :&[f64], outputs :Vec<Vec<f64>>, target :&[f64], learning_rate: f64 ) {
debug!("Error: {}", error(target, outputs.last().expect("outputs")));
let l = outputs.len();
let mut new_weights = self.weights.clone();
let mut new_targets = vec!();
for (order,weights) in self.weights.iter().rev().enumerate() {
let rev_order = l-order-1;
let previous_outputs = if rev_order>0 {
&outputs[rev_order-1]
} else {
inputs
};
let previous_size = size(&weights).0;
debug!("previous size: {}",previous_size);
debug!("weights to update: {:?}",size(&weights));
new_targets.push(vec!(0.0; previous_size));
for (i,o) in outputs[rev_order].iter().enumerate() {
let diff = if order==0 {
o - target[i]
} else {
new_targets[order-1][i]
};
let deriv = self.activations[rev_order].derive(&outputs[rev_order],i);
let d = diff * deriv;
debug!("Output: {} -> {}", o, d);
for r in 0..previous_size{
let w = get(&weights,r,i);
let p = if r<previous_outputs.len() {previous_outputs[r]} else {1.0};
let uw = update_weight(w, d * p, learning_rate);
debug!("Weight for row {}: {} -> {} -> {}", r, w, uw, w*d);
set(&mut new_weights[rev_order],r,i,uw);
new_targets[order][r]+=w*d;
}
}
debug!("New targets: {:?}",new_targets);
}
debug!("Before backprop: {:?}",self.weights);
self.weights=new_weights;
}
/// train for one input and one target
pub fn train(&mut self, inputs :&[f64], target :&[f64], learning_rate: f64, max_iter: usize, max_error: f64) -> (usize,f64) {
let mut err;
for it in 0..max_iter {
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
if err < max_error {
return (it,err);
}
self.backward(inputs,outputs,target,learning_rate);
}
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
(max_iter,err)
}
/// online training for multiple input/targets
pub fn train_online(&mut self, tests: &[Test], learning_rate: f64, max_iter: usize) -> f64 {
for _ in 0..max_iter {
for test in tests {
let outputs = self.forward(&test.input);
self.backward(&test.input,outputs,&test.target,learning_rate);
}
}
tests.iter().map(|t| {
let outputs = self.forward(&t.input);
error(&t.target, outputs.last().expect("outputs"))
}).sum()
}
}
// generate random initial weights
fn initial_random_weights(topology: &Vec<usize>) -> Vec<Matrix> {
let mut v = Vec::new();
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
v.push(Matrix::rand_range(psz+1,sz,0.0,1.0));
}
Some(sz)
}
);
assert_eq!(topology.len()-1,v.len());
v
}
// build a network from initial weights
fn initial_weights(topology: &Vec<usize>, weights: &[f64]) -> Vec<Matrix> {
let mut v = Vec::new();
let mut st = 0;
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
let end =st + (psz+1) * sz;
assert!(end <= weights.len());
v.push(Matrix::new(psz+1,sz,&weights[st..end]));
st = end;
}
Some(sz)
}
);
assert_eq!(st,weights.len());
assert_eq!(topology.len()-1,v.len());
v
}
/// sigmoid function
pub fn sigmoid(i: f64) -> f64 {
f64::powi(1.0 + f64::exp(-i),-1)
}
/// hyperbolic tangent
pub fn hyptan(i: f64) -> f64 {
let minus2 = f64::exp(-2.0 * i);
(1.0-minus2)/(1.0+minus2)
}
/// RELU function
pub fn relu(i:f64) -> f64 {
f64::max(0.0,i)
}
/// softmax function
pub fn softmax(v: &[f64]) -> Vec<f64> {
let mut v2 = Vec::with_capacity(v.len());
let d = v.iter().max_by(|x,y| x.partial_cmp(y).expect("NaN")).expect("empty vector");
let s = v.iter().fold(0.0,|s,w|{
let e=f64::exp(*w-d);
v2.push(e);
s+e
});
if s == 0.0 {
v2
} else {
v2.iter().map(|w| w/s).collect()
}
}
/// error: sum of errors squared
pub fn error(target: &[f64], output: &[f64]) -> f64 {
target.iter().zip(output.iter()).map(|(t,o)| f64::powi(t-o,2)/2.0).sum()
}
/*fn diff_deriv(target: f64, output: f64) -> f64 {
let diff = output - target;
let deriv = output * (1.0 - output);
diff * deriv
}
fn weight_error(target: f64, output: f64, weighted_input: f64) -> f64 {
diff_deriv(target,output) * weighted_input
}*/
// get the updated value for a weight
fn update_weight(old: f64, error: f64, learning_rate: f64) -> f64 {
old - error * learning_rate
}
/// an activation function
pub trait Activation : std::fmt::Debug {
/// forward activation of all inputs
fn activate(&self, inputs: &[f64]) -> Vec<f64>;
/// derivation for one output given all the outputs and the output index
fn derive(&self, outputs: &[f64], index: usize) -> f64;
}
/// Sigmoid activation function
#[derive(Debug)]
pub struct Sigmoid{}
impl Activation for Sigmoid {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| sigmoid(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
outputs[index] * (1.0 - outputs[index])
}
}
/// Relu activation function
#[derive(Debug)]
pub struct Relu{}
impl Activation for Relu {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| relu(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
if outputs[index] > 0.0 {1.0} else {0.0}
}
}
/// Softmax activation function
#[derive(Debug)]
pub struct Softmax{}
impl Activation for Softmax {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
softmax(inputs)
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
let s: f64 = outputs.iter().sum();
let el = outputs[index];
(s-el)*el / s.powi(2)
}
}
/// Encapsulate one possible input and the target output, for training
pub struct Test {
pub input: Vec<f64>,
pub target: Vec<f64>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mattmazur() {
// <https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/>
let mut nn = Network::new(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(2,2,2),&vec!(
0.15, 0.25, 0.2, 0.3, 0.35, 0.35,
0.4, 0.5, 0.45, 0.55, 0.6, 0.6
));
let f1s = nn.forward(&vec!(0.05,0.1));
assert_eq!(f1s[0],vec!(0.5932699921071872, 0.596884378259767));
assert_eq!(f1s[1],vec!(0.7513650695523157, 0.7729284653214625));
assert_eq!(0.2983711087600027,error(&vec!(0.01,0.99), &f1s[1]));
// assert_eq!(0.08216704056423078, weight_error(0.01, 0.7513650695523157, f1s[0][0]));
assert_eq!(0.35891647971788465, update_weight(0.4, 0.08216704056423078, 0.5));
nn.backward(&vec!(0.05,0.1),f1s,&vec!(0.01,0.99), 0.5);
let f2s = nn.forward(&vec!(0.05,0.1));
//assert_eq!(0.29102777369359933,error(&vec!(0.01,0.99), &f2s[1]));
// we update the biases too
assert_eq!(0.28047144679143016,error(&vec!(0.01,0.99), &f2s[1]));
let last_error=nn.train(&vec!(0.05,0.1),&vec!(0.01,0.99),0.5,10000,0.0000000001);
assert!(last_error.1<0.00001);
assert!(last_error.0<=10000);
}
#[test]
fn test_becominghuman(){
// <https://becominghuman.ai/making-a-simple-neural-network-2ea1de81ec20>
let mut nn = Network::new_random(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(4,4,1));
let tests = vec!(
Test{input:vec!(1.0,0.0,0.0,0.0),target:vec!(0.0)},
Test{input:vec!(0.0,1.0,0.0,0.0),target:vec!(0.0)},
Test{input:vec!(0.0,0.0,1.0,0.0),target:vec!(1.0)},
Test{input:vec!(0.0,0.0,0.0,1.0),target:vec!(0.0)},
);
let last_error=nn.train_online(&tests,0.5,10000);
println!("last: {}",last_error);
assert!(last_error<0.001);
}
#[test]
fn test_towards(){
// <https://towardsdatascience.com/how-to-build-your-own-neural-network-from-scratch-in-python-68998a08e4f6>
let mut nn = Network::new_random(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(3,4,1));
let tests = vec!(
Test{input:vec!(0.0,0.0,1.0),target:vec!(0.0)},
Test{input:vec!(0.0,1.0,1.0),target:vec!(1.0)},
Test{input:vec!(1.0,0.0,1.0),target:vec!(1.0)},
Test{input:vec!(1.0,1.0,1.0),target:vec!(0.0)},
);
let last_error=nn.train_online(&tests,0.5,1500);
println!("last: {}",last_error);
assert!(last_error<0.05);
}
#[test]
fn test_softmax(){
assert_eq!(vec!(0.09003057317038046, 0.24472847105479764, 0.6652409557748218),softmax(&vec!(1.0,2.0,3.0)));
assert_eq!(vec!(0.0, 0.0, 1.0),softmax(&vec!(1000.0,2000.0,3000.0)));
}
}
| forward | identifier_name |
vm.rs | Translation to Rust, MetaBuff/multiple classes addition, hashmaps for syscalls)
// Copyright 2020 Solana Maintainers <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
// the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Virtual machine for eBPF programs.
use crate::{
ebpf,
elf::{Executable, FunctionRegistry, SBPFVersion},
error::EbpfError,
interpreter::Interpreter,
memory_region::MemoryMapping,
static_analysis::{Analysis, TraceLogEntry},
};
use std::{collections::BTreeMap, fmt::Debug, mem, sync::Arc};
/// Same as `Result` but provides a stable memory layout
#[derive(Debug)]
#[repr(C, u64)]
pub enum StableResult<T, E> {
/// Success
Ok(T),
/// Failure
Err(E),
}
impl<T: Debug, E: Debug> StableResult<T, E> {
/// `true` if `Ok`
pub fn is_ok(&self) -> bool {
match self {
Self::Ok(_) => true,
Self::Err(_) => false,
}
}
/// `true` if `Err`
pub fn is_err(&self) -> bool {
match self {
Self::Ok(_) => false,
Self::Err(_) => true,
}
}
/// Returns the inner value if `Ok`, panics otherwise
pub fn unwrap(self) -> T {
match self {
Self::Ok(value) => value,
Self::Err(error) => panic!("unwrap {:?}", error),
}
}
/// Returns the inner error if `Err`, panics otherwise
pub fn unwrap_err(self) -> E {
match self {
Self::Ok(value) => panic!("unwrap_err {:?}", value),
Self::Err(error) => error,
}
}
}
impl<T, E> From<StableResult<T, E>> for Result<T, E> {
fn from(result: StableResult<T, E>) -> Self {
match result {
StableResult::Ok(value) => Ok(value),
StableResult::Err(value) => Err(value),
}
}
}
impl<T, E> From<Result<T, E>> for StableResult<T, E> {
fn from(result: Result<T, E>) -> Self {
match result {
Ok(value) => Self::Ok(value),
Err(value) => Self::Err(value),
}
}
}
/// Return value of programs and syscalls
pub type ProgramResult = StableResult<u64, Box<dyn std::error::Error>>;
/// Syscall function without context
pub type BuiltinFunction<C> =
fn(&mut C, u64, u64, u64, u64, u64, &mut MemoryMapping, &mut ProgramResult);
/// Represents the interface to a fixed functionality program
#[derive(Eq)]
pub struct BuiltinProgram<C: ContextObject> {
/// Holds the Config if this is a loader program
config: Option<Box<Config>>,
/// Function pointers by symbol
functions: FunctionRegistry<BuiltinFunction<C>>,
}
impl<C: ContextObject> PartialEq for BuiltinProgram<C> {
fn eq(&self, other: &Self) -> bool {
self.config.eq(&other.config) && self.functions.eq(&other.functions)
}
}
impl<C: ContextObject> BuiltinProgram<C> {
/// Constructs a loader built-in program
pub fn new_loader(config: Config, functions: FunctionRegistry<BuiltinFunction<C>>) -> Self {
Self {
config: Some(Box::new(config)),
functions,
}
}
/// Constructs a built-in program
pub fn new_builtin(functions: FunctionRegistry<BuiltinFunction<C>>) -> Self {
Self {
config: None,
functions,
}
}
/// Constructs a mock loader built-in program
pub fn new_mock() -> Self {
Self {
config: Some(Box::default()),
functions: FunctionRegistry::default(),
}
}
/// Get the configuration settings assuming this is a loader program
pub fn get_config(&self) -> &Config {
self.config.as_ref().unwrap()
}
/// Get the function registry
pub fn get_function_registry(&self) -> &FunctionRegistry<BuiltinFunction<C>> {
&self.functions
}
/// Calculate memory size
pub fn mem_size(&self) -> usize {
mem::size_of::<Self>()
+ if self.config.is_some() {
mem::size_of::<Config>()
} else {
0
}
+ self.functions.mem_size()
}
}
impl<C: ContextObject> Debug for BuiltinProgram<C> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
writeln!(f, "{:?}", unsafe {
// `derive(Debug)` does not know that `C: ContextObject` does not need to implement `Debug`
std::mem::transmute::<
&FunctionRegistry<BuiltinFunction<C>>,
&FunctionRegistry<BuiltinFunction<*const ()>>,
>(&self.functions)
})?;
Ok(())
}
}
/// VM configuration settings
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Config {
/// Maximum call depth
pub max_call_depth: usize,
/// Size of a stack frame in bytes, must match the size specified in the LLVM BPF backend
pub stack_frame_size: usize,
/// Enables the use of MemoryMapping and MemoryRegion for address translation
pub enable_address_translation: bool,
/// Enables gaps in VM address space between the stack frames
pub enable_stack_frame_gaps: bool,
/// Maximal pc distance after which a new instruction meter validation is emitted by the JIT
pub instruction_meter_checkpoint_distance: usize,
/// Enable instruction meter and limiting
pub enable_instruction_meter: bool,
/// Enable instruction tracing
pub enable_instruction_tracing: bool,
/// Enable dynamic string allocation for labels
pub enable_symbol_and_section_labels: bool,
/// Reject ELF files containing issues that the verifier did not catch before (up to v0.2.21)
pub reject_broken_elfs: bool,
/// Ratio of native host instructions per random no-op in JIT (0 = OFF)
pub noop_instruction_rate: u32,
/// Enable disinfection of immediate values and offsets provided by the user in JIT
pub sanitize_user_provided_values: bool,
/// Encrypt the runtime environment in JIT
pub encrypt_runtime_environment: bool,
/// Throw ElfError::SymbolHashCollision when a BPF function collides with a registered syscall
pub external_internal_function_hash_collision: bool,
/// Have the verifier reject "callx r10"
pub reject_callx_r10: bool,
/// Avoid copying read only sections when possible
pub optimize_rodata: bool,
/// Use the new ELF parser
pub new_elf_parser: bool,
/// Use aligned memory mapping
pub aligned_memory_mapping: bool,
/// Allow ExecutableCapability::V1
pub enable_sbpf_v1: bool,
/// Allow ExecutableCapability::V2
pub enable_sbpf_v2: bool,
}
impl Config {
/// Returns the size of the stack memory region
pub fn stack_size(&self) -> usize {
self.stack_frame_size * self.max_call_depth
}
}
impl Default for Config {
fn default() -> Self {
Self {
max_call_depth: 20,
stack_frame_size: 4_096,
enable_address_translation: true,
enable_stack_frame_gaps: true,
instruction_meter_checkpoint_distance: 10000,
enable_instruction_meter: true,
enable_instruction_tracing: false,
enable_symbol_and_section_labels: false,
reject_broken_elfs: false,
noop_instruction_rate: 256,
sanitize_user_provided_values: true,
encrypt_runtime_environment: true,
external_internal_function_hash_collision: true,
reject_callx_r10: true,
optimize_rodata: true,
new_elf_parser: true,
aligned_memory_mapping: true,
enable_sbpf_v1: true,
enable_sbpf_v2: true,
}
}
}
/// Static constructors for Executable
impl<C: ContextObject> Executable<C> {
/// Creates an executable from an ELF file
pub fn from_elf(elf_bytes: &[u8], loader: Arc<BuiltinProgram<C>>) -> Result<Self, EbpfError> {
let executable = Executable::load(elf_bytes, loader)?;
Ok(executable)
}
/// Creates an executable from machine code
pub fn from_text_bytes(
text_bytes: &[u8],
loader: Arc<BuiltinProgram<C>>,
sbpf_version: SBPFVersion,
function_registry: FunctionRegistry<usize>,
) -> Result<Self, EbpfError> {
Executable::new_from_text_bytes(text_bytes, loader, sbpf_version, function_registry)
.map_err(EbpfError::ElfError)
}
}
/// Runtime context
pub trait ContextObject {
/// Called for every instruction executed when tracing is enabled
fn trace(&mut self, state: [u64; 12]);
/// Consume instructions from meter
fn consume(&mut self, amount: u64);
/// Get the number of remaining instructions allowed
fn get_remaining(&self) -> u64;
}
/// Simple instruction meter for testing
#[derive(Debug, Clone, Default)]
pub struct | {
/// Contains the register state at every instruction in order of execution
pub trace_log: Vec<TraceLogEntry>,
/// Maximal amount of instructions which still can be executed
pub remaining: u64,
}
impl ContextObject for TestContextObject {
fn trace(&mut self, state: [u64; 12]) {
self.trace_log.push(state);
}
fn consume(&mut self, amount: u64) {
self.remaining = self.remaining.saturating_sub(amount);
}
fn get_remaining(&self) -> u64 {
self.remaining
}
}
impl TestContextObject {
/// Initialize with instruction meter
pub fn new(remaining: u64) -> Self {
Self {
trace_log: Vec::new(),
remaining,
}
}
/// Compares an interpreter trace and a JIT trace.
///
/// The log of the JIT can be longer because it only validates the instruction meter at branches.
pub fn compare_trace_log(interpreter: &Self, jit: &Self) -> bool {
let interpreter = interpreter.trace_log.as_slice();
let mut jit = jit.trace_log.as_slice();
if jit.len() > interpreter.len() {
jit = &jit[0..interpreter.len()];
}
interpreter == jit
}
}
/// Statistic of taken branches (from a recorded trace)
pub struct DynamicAnalysis {
/// Maximal edge counter value
pub edge_counter_max: usize,
/// src_node, dst_node, edge_counter
pub edges: BTreeMap<usize, BTreeMap<usize, usize>>,
}
impl DynamicAnalysis {
/// Accumulates a trace
pub fn new(trace_log: &[[u64; 12]], analysis: &Analysis) -> Self {
let mut result = Self {
edge_counter_max: 0,
edges: BTreeMap::new(),
};
let mut last_basic_block = usize::MAX;
for traced_instruction in trace_log.iter() {
let pc = traced_instruction[11] as usize;
if analysis.cfg_nodes.contains_key(&pc) {
let counter = result
.edges
.entry(last_basic_block)
.or_default()
.entry(pc)
.or_insert(0);
*counter += 1;
result.edge_counter_max = result.edge_counter_max.max(*counter);
last_basic_block = pc;
}
}
result
}
}
/// A call frame used for function calls inside the Interpreter
#[derive(Clone, Default)]
pub struct CallFrame {
/// The caller saved registers
pub caller_saved_registers: [u64; ebpf::SCRATCH_REGS],
/// The callers frame pointer
pub frame_pointer: u64,
/// The target_pc of the exit instruction which returns back to the caller
pub target_pc: usize,
}
/// A virtual machine to run eBPF programs.
///
/// # Examples
///
/// ```
/// use solana_rbpf::{
/// aligned_memory::AlignedMemory,
/// ebpf,
/// elf::{Executable, FunctionRegistry, SBPFVersion},
/// memory_region::{MemoryMapping, MemoryRegion},
/// verifier::{RequisiteVerifier},
/// vm::{BuiltinProgram, Config, EbpfVm, TestContextObject},
/// };
///
/// let prog = &[
/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
/// ];
/// let mem = &mut [
/// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
/// ];
///
/// let loader = std::sync::Arc::new(BuiltinProgram::new_mock());
/// let function_registry = FunctionRegistry::default();
/// let mut executable = Executable::<TestContextObject>::from_text_bytes(prog, loader, SBPFVersion::V2, function_registry).unwrap();
/// executable.verify::<RequisiteVerifier>().unwrap();
/// let mut context_object = TestContextObject::new(1);
/// let config = executable.get_config();
/// let sbpf_version = executable.get_sbpf_version();
///
/// let mut stack = AlignedMemory::<{ebpf::HOST_ALIGN}>::zero_filled(config.stack_size());
/// let stack_len = stack.len();
/// let mut heap = AlignedMemory::<{ebpf::HOST_ALIGN}>::with_capacity(0);
///
/// let regions: Vec<MemoryRegion> = vec![
/// executable.get_ro_region(),
/// MemoryRegion::new_writable(
/// stack.as_slice_mut(),
/// ebpf::MM_STACK_START,
/// ),
/// MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
/// MemoryRegion::new_writable(mem, ebpf::MM_INPUT_START),
/// ];
///
/// let memory_mapping = MemoryMapping::new(regions, config, sbpf_version).unwrap();
///
/// let mut vm = EbpfVm::new(config, sbpf_version, &mut context_object, memory_mapping, stack_len);
///
/// let (instruction_count, result) = vm.execute_program(&executable, true);
/// assert_eq!(instruction_count, 1);
/// assert_eq!(result.unwrap(), 0);
/// ```
#[repr(C)]
pub struct EbpfVm<'a, C: ContextObject> {
/// Needed to exit from the guest back into the host
pub host_stack_pointer: *mut u64,
/// The current call depth.
///
/// Incremented on calls and decremented on exits. It's used to enforce
/// config.max_call_depth and to know when to terminate execution.
pub call_depth: u64,
/// Guest stack pointer (r11).
///
/// The stack pointer isn't exposed as an actual register. Only sub and add
/// instructions (typically generated by the LLVM backend) are allowed to
/// access it when sbpf_version.dynamic_stack_frames()=true. Its value is only
/// stored here and therefore the register is not tracked in REGISTER_MAP.
pub stack_pointer: u64,
/// Pointer to ContextObject
pub context_object_pointer: &'a mut C,
/// Last return value of instruction_meter.get_remaining()
pub previous_instruction_meter: u64,
/// CPU cycles accumulated by the stop watch
pub stopwatch_numerator: u64,
/// Number of times the stop watch was used
pub stopwatch_denominator: u64,
/// ProgramResult inlined
pub program_result: ProgramResult,
/// MemoryMapping inlined
pub memory_mapping: MemoryMapping<'a>,
/// Stack of CallFrames used by the Interpreter
pub call_frames: Vec<CallFrame>,
/// TCP port for the debugger interface
#[cfg(feature = "debugger")]
pub debug_port: Option<u16>,
}
impl<'a, C: ContextObject> EbpfVm<'a, C> {
/// Creates a new virtual machine instance.
pub fn new(
config: &Config,
sbpf_version: &SBPFVersion,
context_object: &'a mut C,
mut memory_mapping: MemoryMapping<'a>,
stack_len: usize,
) -> Self {
let stack_pointer =
ebpf::MM_STACK_START.saturating_add(if sbpf_version.dynamic_stack_frames() {
// the stack is fully descending, frames start as empty and change size anytime r11 is modified
stack_len
} else {
// within a frame the stack grows down, but frames are ascending
config.stack_frame_size
} as u64);
if!config.enable_address_translation {
memory_mapping = MemoryMapping::new_identity();
}
EbpfVm {
host_stack_pointer: std::ptr::null_mut(),
call_depth: 0,
stack_pointer,
context_object_pointer: context_object,
previous_instruction_meter: 0,
stopwatch_numerator: 0,
stopwatch_denominator: 0,
program_result: ProgramResult::Ok(0),
memory_mapping,
call_frames: vec![CallFrame::default(); config.max_call_depth],
#[cfg(feature = "debugger")]
debug_port: None,
}
}
/// Execute the program
///
/// If interpreted = `false` then the JIT compiled executable is used.
pub fn execute_program(
&mut self,
executable: &Executable<C>,
interpreted: bool,
) -> (u64, ProgramResult) {
let mut registers = [0u64; 12];
// R1 points to beginning of input memory, R10 to the stack of the first frame, R11 is the pc (hidden)
registers[1] = ebpf::MM_INPUT_START;
registers[ebpf::FRAME_PTR_REG] = self.stack_pointer;
registers[11] = executable.get_entrypoint_instruction_offset() as u64;
let config = executable.get_config();
let initial_insn_count = if config.enable_instruction_meter {
self.context_object_pointer.get_remaining()
} else {
0
};
self.previous_instruction_meter = initial_insn_count;
self.program_result = ProgramResult::Ok(0);
let due_insn_count = if interpreted {
#[cfg(feature = "debugger")]
let debug_port = self.debug_port.clone();
let mut interpreter = Interpreter::new(self, executable, registers);
#[cfg(feature = "debugger")]
if let Some(debug_port) = debug_port {
crate::debugger::execute(&mut interpreter, debug_port);
} else {
while interpreter.step() {}
}
#[cfg(not(feature = "debugger"))]
while interpreter.step() {}
interpreter.due_insn_count
} else {
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
{
let compiled_program = match executable
.get_compiled_program()
.ok_or_else(|| Box::new(EbpfError::JitNotCompiled))
{
Ok(compiled_program) => compiled_program,
Err(error) => return (0, ProgramResult::Err(error)),
};
let instruction_meter_final =
compiled_program.invoke(config, self, registers).max(0) as u64;
self.context_object_pointer
.get_remaining()
.saturating_sub(instruction_meter_final)
}
#[cfg(not(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64")))]
{
return (0, ProgramResult::Err(Box::new(EbpfError::JitNotCompiled)));
}
};
let instruction_count = if config.enable_instruction_meter {
self.context_object_pointer.consume(due_insn_count);
initial_insn_count.saturating_sub(self.context_object_pointer.get_remaining())
} else {
0
};
let mut result = ProgramResult::Ok(0);
std::mem::swap(&mut result, &mut self.program_result);
(instruction_count, result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::syscalls;
#[test]
fn test_program_result_is_stable() {
let ok = ProgramResult::Ok(42);
assert_eq!(unsafe { *(&ok as *const _ as *const u64) }, 0);
let err = ProgramResult::Err(Box::new(EbpfError::JitNotCompiled));
assert_eq!(unsafe { *(&err as *const _ as *const u64) }, 1);
}
#[test]
fn test_builtin_program_eq() {
let mut function_registry_a =
FunctionRegistry::<BuiltinFunction<TestContextObject>>::default();
function_registry_a
.register_function_hashed(*b"log", syscalls::bpf_syscall_string)
.unwrap();
function_registry_a
.register_function_hashed(*b"log_64", syscalls::bpf_syscall_u64)
.unwrap();
let mut function_registry_b =
FunctionRegistry::<BuiltinFunction<TestContextObject>>::default();
function_registry_b
.register_function_hashed(*b"log_64", syscalls::bpf_syscall_u64)
.unwrap();
function_registry_b
.register_function_hashed(*b"log", syscalls::bpf_syscall_string)
.unwrap();
let mut function_registry_c =
FunctionRegistry::<BuiltinFunction<TestContextObject>>:: | TestContextObject | identifier_name |
vm.rs | Translation to Rust, MetaBuff/multiple classes addition, hashmaps for syscalls)
// Copyright 2020 Solana Maintainers <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
// the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Virtual machine for eBPF programs.
use crate::{
ebpf,
elf::{Executable, FunctionRegistry, SBPFVersion},
error::EbpfError,
interpreter::Interpreter,
memory_region::MemoryMapping,
static_analysis::{Analysis, TraceLogEntry},
};
use std::{collections::BTreeMap, fmt::Debug, mem, sync::Arc};
/// Same as `Result` but provides a stable memory layout
#[derive(Debug)]
#[repr(C, u64)]
pub enum StableResult<T, E> {
/// Success
Ok(T),
/// Failure
Err(E),
}
impl<T: Debug, E: Debug> StableResult<T, E> {
/// `true` if `Ok`
pub fn is_ok(&self) -> bool {
match self {
Self::Ok(_) => true,
Self::Err(_) => false,
}
}
/// `true` if `Err`
pub fn is_err(&self) -> bool {
match self {
Self::Ok(_) => false,
Self::Err(_) => true,
}
}
/// Returns the inner value if `Ok`, panics otherwise
pub fn unwrap(self) -> T {
match self {
Self::Ok(value) => value,
Self::Err(error) => panic!("unwrap {:?}", error),
}
}
/// Returns the inner error if `Err`, panics otherwise
pub fn unwrap_err(self) -> E {
match self {
Self::Ok(value) => panic!("unwrap_err {:?}", value),
Self::Err(error) => error,
}
}
}
impl<T, E> From<StableResult<T, E>> for Result<T, E> {
fn from(result: StableResult<T, E>) -> Self {
match result {
StableResult::Ok(value) => Ok(value),
StableResult::Err(value) => Err(value),
}
}
}
impl<T, E> From<Result<T, E>> for StableResult<T, E> {
fn from(result: Result<T, E>) -> Self {
match result {
Ok(value) => Self::Ok(value),
Err(value) => Self::Err(value),
}
}
}
/// Return value of programs and syscalls
pub type ProgramResult = StableResult<u64, Box<dyn std::error::Error>>;
/// Syscall function without context
pub type BuiltinFunction<C> =
fn(&mut C, u64, u64, u64, u64, u64, &mut MemoryMapping, &mut ProgramResult);
/// Represents the interface to a fixed functionality program
#[derive(Eq)]
pub struct BuiltinProgram<C: ContextObject> {
/// Holds the Config if this is a loader program
config: Option<Box<Config>>,
/// Function pointers by symbol
functions: FunctionRegistry<BuiltinFunction<C>>,
}
impl<C: ContextObject> PartialEq for BuiltinProgram<C> {
fn eq(&self, other: &Self) -> bool {
self.config.eq(&other.config) && self.functions.eq(&other.functions)
}
}
impl<C: ContextObject> BuiltinProgram<C> {
/// Constructs a loader built-in program
pub fn new_loader(config: Config, functions: FunctionRegistry<BuiltinFunction<C>>) -> Self {
Self {
config: Some(Box::new(config)),
functions,
}
}
/// Constructs a built-in program
pub fn new_builtin(functions: FunctionRegistry<BuiltinFunction<C>>) -> Self {
Self {
config: None,
functions,
}
}
/// Constructs a mock loader built-in program
pub fn new_mock() -> Self {
Self {
config: Some(Box::default()),
functions: FunctionRegistry::default(),
}
}
/// Get the configuration settings assuming this is a loader program
pub fn get_config(&self) -> &Config {
self.config.as_ref().unwrap()
}
/// Get the function registry
pub fn get_function_registry(&self) -> &FunctionRegistry<BuiltinFunction<C>> {
&self.functions
}
/// Calculate memory size
pub fn mem_size(&self) -> usize {
mem::size_of::<Self>()
+ if self.config.is_some() {
mem::size_of::<Config>()
} else {
0
}
+ self.functions.mem_size()
}
}
impl<C: ContextObject> Debug for BuiltinProgram<C> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
writeln!(f, "{:?}", unsafe {
// `derive(Debug)` does not know that `C: ContextObject` does not need to implement `Debug`
std::mem::transmute::<
&FunctionRegistry<BuiltinFunction<C>>,
&FunctionRegistry<BuiltinFunction<*const ()>>,
>(&self.functions)
})?;
Ok(())
}
}
/// VM configuration settings
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Config {
/// Maximum call depth
pub max_call_depth: usize,
/// Size of a stack frame in bytes, must match the size specified in the LLVM BPF backend
pub stack_frame_size: usize,
/// Enables the use of MemoryMapping and MemoryRegion for address translation
pub enable_address_translation: bool,
/// Enables gaps in VM address space between the stack frames
pub enable_stack_frame_gaps: bool,
/// Maximal pc distance after which a new instruction meter validation is emitted by the JIT
pub instruction_meter_checkpoint_distance: usize,
/// Enable instruction meter and limiting
pub enable_instruction_meter: bool,
/// Enable instruction tracing
pub enable_instruction_tracing: bool,
/// Enable dynamic string allocation for labels
pub enable_symbol_and_section_labels: bool,
/// Reject ELF files containing issues that the verifier did not catch before (up to v0.2.21)
pub reject_broken_elfs: bool,
/// Ratio of native host instructions per random no-op in JIT (0 = OFF)
pub noop_instruction_rate: u32,
/// Enable disinfection of immediate values and offsets provided by the user in JIT
pub sanitize_user_provided_values: bool,
/// Encrypt the runtime environment in JIT
pub encrypt_runtime_environment: bool,
/// Throw ElfError::SymbolHashCollision when a BPF function collides with a registered syscall
pub external_internal_function_hash_collision: bool,
/// Have the verifier reject "callx r10"
pub reject_callx_r10: bool,
/// Avoid copying read only sections when possible
pub optimize_rodata: bool,
/// Use the new ELF parser
pub new_elf_parser: bool,
/// Use aligned memory mapping
pub aligned_memory_mapping: bool,
/// Allow ExecutableCapability::V1
pub enable_sbpf_v1: bool,
/// Allow ExecutableCapability::V2
pub enable_sbpf_v2: bool,
}
impl Config {
/// Returns the size of the stack memory region
pub fn stack_size(&self) -> usize {
self.stack_frame_size * self.max_call_depth
}
}
impl Default for Config {
fn default() -> Self {
Self {
max_call_depth: 20,
stack_frame_size: 4_096,
enable_address_translation: true,
enable_stack_frame_gaps: true,
instruction_meter_checkpoint_distance: 10000,
enable_instruction_meter: true,
enable_instruction_tracing: false,
enable_symbol_and_section_labels: false,
reject_broken_elfs: false,
noop_instruction_rate: 256,
sanitize_user_provided_values: true,
encrypt_runtime_environment: true,
external_internal_function_hash_collision: true,
reject_callx_r10: true,
optimize_rodata: true,
new_elf_parser: true,
aligned_memory_mapping: true,
enable_sbpf_v1: true,
enable_sbpf_v2: true,
}
}
}
/// Static constructors for Executable
impl<C: ContextObject> Executable<C> {
/// Creates an executable from an ELF file
pub fn from_elf(elf_bytes: &[u8], loader: Arc<BuiltinProgram<C>>) -> Result<Self, EbpfError> {
let executable = Executable::load(elf_bytes, loader)?;
Ok(executable)
}
/// Creates an executable from machine code
pub fn from_text_bytes(
text_bytes: &[u8],
loader: Arc<BuiltinProgram<C>>,
sbpf_version: SBPFVersion,
function_registry: FunctionRegistry<usize>,
) -> Result<Self, EbpfError> {
Executable::new_from_text_bytes(text_bytes, loader, sbpf_version, function_registry)
.map_err(EbpfError::ElfError)
}
}
/// Runtime context
pub trait ContextObject {
/// Called for every instruction executed when tracing is enabled
fn trace(&mut self, state: [u64; 12]);
/// Consume instructions from meter
fn consume(&mut self, amount: u64);
/// Get the number of remaining instructions allowed
fn get_remaining(&self) -> u64;
}
/// Simple instruction meter for testing
#[derive(Debug, Clone, Default)]
pub struct TestContextObject {
/// Contains the register state at every instruction in order of execution
pub trace_log: Vec<TraceLogEntry>,
/// Maximal amount of instructions which still can be executed
pub remaining: u64,
}
impl ContextObject for TestContextObject {
fn trace(&mut self, state: [u64; 12]) {
self.trace_log.push(state);
}
fn consume(&mut self, amount: u64) {
self.remaining = self.remaining.saturating_sub(amount);
}
fn get_remaining(&self) -> u64 {
self.remaining
}
}
impl TestContextObject {
/// Initialize with instruction meter
pub fn new(remaining: u64) -> Self {
Self {
trace_log: Vec::new(),
remaining,
}
}
/// Compares an interpreter trace and a JIT trace.
///
/// The log of the JIT can be longer because it only validates the instruction meter at branches.
pub fn compare_trace_log(interpreter: &Self, jit: &Self) -> bool {
let interpreter = interpreter.trace_log.as_slice();
let mut jit = jit.trace_log.as_slice();
if jit.len() > interpreter.len() {
jit = &jit[0..interpreter.len()];
}
interpreter == jit
}
}
/// Statistic of taken branches (from a recorded trace)
pub struct DynamicAnalysis {
/// Maximal edge counter value
pub edge_counter_max: usize,
/// src_node, dst_node, edge_counter
pub edges: BTreeMap<usize, BTreeMap<usize, usize>>,
}
impl DynamicAnalysis {
/// Accumulates a trace
pub fn new(trace_log: &[[u64; 12]], analysis: &Analysis) -> Self {
let mut result = Self {
edge_counter_max: 0,
edges: BTreeMap::new(),
};
let mut last_basic_block = usize::MAX;
for traced_instruction in trace_log.iter() {
let pc = traced_instruction[11] as usize;
if analysis.cfg_nodes.contains_key(&pc) {
let counter = result
.edges
.entry(last_basic_block)
.or_default()
.entry(pc)
.or_insert(0);
*counter += 1;
result.edge_counter_max = result.edge_counter_max.max(*counter);
last_basic_block = pc;
}
}
result
}
}
/// A call frame used for function calls inside the Interpreter
#[derive(Clone, Default)]
pub struct CallFrame {
/// The caller saved registers
pub caller_saved_registers: [u64; ebpf::SCRATCH_REGS],
/// The callers frame pointer
pub frame_pointer: u64,
/// The target_pc of the exit instruction which returns back to the caller
pub target_pc: usize,
}
/// A virtual machine to run eBPF programs.
///
/// # Examples
///
/// ```
/// use solana_rbpf::{
/// aligned_memory::AlignedMemory,
/// ebpf,
/// elf::{Executable, FunctionRegistry, SBPFVersion},
/// memory_region::{MemoryMapping, MemoryRegion},
/// verifier::{RequisiteVerifier},
/// vm::{BuiltinProgram, Config, EbpfVm, TestContextObject},
/// };
///
/// let prog = &[
/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
/// ];
/// let mem = &mut [
/// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
/// ];
///
/// let loader = std::sync::Arc::new(BuiltinProgram::new_mock());
/// let function_registry = FunctionRegistry::default();
/// let mut executable = Executable::<TestContextObject>::from_text_bytes(prog, loader, SBPFVersion::V2, function_registry).unwrap();
/// executable.verify::<RequisiteVerifier>().unwrap();
/// let mut context_object = TestContextObject::new(1);
/// let config = executable.get_config();
/// let sbpf_version = executable.get_sbpf_version();
///
/// let mut stack = AlignedMemory::<{ebpf::HOST_ALIGN}>::zero_filled(config.stack_size());
/// let stack_len = stack.len();
/// let mut heap = AlignedMemory::<{ebpf::HOST_ALIGN}>::with_capacity(0);
///
/// let regions: Vec<MemoryRegion> = vec![
/// executable.get_ro_region(),
/// MemoryRegion::new_writable(
/// stack.as_slice_mut(),
/// ebpf::MM_STACK_START,
/// ),
/// MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
/// MemoryRegion::new_writable(mem, ebpf::MM_INPUT_START),
/// ];
///
/// let memory_mapping = MemoryMapping::new(regions, config, sbpf_version).unwrap();
///
/// let mut vm = EbpfVm::new(config, sbpf_version, &mut context_object, memory_mapping, stack_len);
///
/// let (instruction_count, result) = vm.execute_program(&executable, true);
/// assert_eq!(instruction_count, 1);
/// assert_eq!(result.unwrap(), 0);
/// ```
#[repr(C)]
pub struct EbpfVm<'a, C: ContextObject> {
/// Needed to exit from the guest back into the host
pub host_stack_pointer: *mut u64,
/// The current call depth.
///
/// Incremented on calls and decremented on exits. It's used to enforce
/// config.max_call_depth and to know when to terminate execution.
pub call_depth: u64,
/// Guest stack pointer (r11).
///
/// The stack pointer isn't exposed as an actual register. Only sub and add
/// instructions (typically generated by the LLVM backend) are allowed to
/// access it when sbpf_version.dynamic_stack_frames()=true. Its value is only
/// stored here and therefore the register is not tracked in REGISTER_MAP.
pub stack_pointer: u64,
/// Pointer to ContextObject
pub context_object_pointer: &'a mut C,
/// Last return value of instruction_meter.get_remaining()
pub previous_instruction_meter: u64,
/// CPU cycles accumulated by the stop watch
pub stopwatch_numerator: u64,
/// Number of times the stop watch was used
pub stopwatch_denominator: u64,
/// ProgramResult inlined
pub program_result: ProgramResult,
/// MemoryMapping inlined
pub memory_mapping: MemoryMapping<'a>,
/// Stack of CallFrames used by the Interpreter
pub call_frames: Vec<CallFrame>,
/// TCP port for the debugger interface
#[cfg(feature = "debugger")]
pub debug_port: Option<u16>,
}
impl<'a, C: ContextObject> EbpfVm<'a, C> {
/// Creates a new virtual machine instance.
pub fn new(
config: &Config,
sbpf_version: &SBPFVersion,
context_object: &'a mut C,
mut memory_mapping: MemoryMapping<'a>,
stack_len: usize,
) -> Self {
let stack_pointer =
ebpf::MM_STACK_START.saturating_add(if sbpf_version.dynamic_stack_frames() {
// the stack is fully descending, frames start as empty and change size anytime r11 is modified
stack_len
} else {
// within a frame the stack grows down, but frames are ascending
config.stack_frame_size
} as u64);
if!config.enable_address_translation {
memory_mapping = MemoryMapping::new_identity();
}
EbpfVm {
host_stack_pointer: std::ptr::null_mut(),
call_depth: 0,
stack_pointer,
context_object_pointer: context_object,
previous_instruction_meter: 0,
stopwatch_numerator: 0,
stopwatch_denominator: 0,
program_result: ProgramResult::Ok(0),
memory_mapping,
call_frames: vec![CallFrame::default(); config.max_call_depth],
#[cfg(feature = "debugger")]
debug_port: None,
}
}
/// Execute the program
///
/// If interpreted = `false` then the JIT compiled executable is used.
pub fn execute_program(
&mut self,
executable: &Executable<C>,
interpreted: bool,
) -> (u64, ProgramResult) | crate::debugger::execute(&mut interpreter, debug_port);
} else {
while interpreter.step() {}
}
#[cfg(not(feature = "debugger"))]
while interpreter.step() {}
interpreter.due_insn_count
} else {
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
{
let compiled_program = match executable
.get_compiled_program()
.ok_or_else(|| Box::new(EbpfError::JitNotCompiled))
{
Ok(compiled_program) => compiled_program,
Err(error) => return (0, ProgramResult::Err(error)),
};
let instruction_meter_final =
compiled_program.invoke(config, self, registers).max(0) as u64;
self.context_object_pointer
.get_remaining()
.saturating_sub(instruction_meter_final)
}
#[cfg(not(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64")))]
{
return (0, ProgramResult::Err(Box::new(EbpfError::JitNotCompiled)));
}
};
let instruction_count = if config.enable_instruction_meter {
self.context_object_pointer.consume(due_insn_count);
initial_insn_count.saturating_sub(self.context_object_pointer.get_remaining())
} else {
0
};
let mut result = ProgramResult::Ok(0);
std::mem::swap(&mut result, &mut self.program_result);
(instruction_count, result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::syscalls;
#[test]
fn test_program_result_is_stable() {
let ok = ProgramResult::Ok(42);
assert_eq!(unsafe { *(&ok as *const _ as *const u64) }, 0);
let err = ProgramResult::Err(Box::new(EbpfError::JitNotCompiled));
assert_eq!(unsafe { *(&err as *const _ as *const u64) }, 1);
}
#[test]
fn test_builtin_program_eq() {
let mut function_registry_a =
FunctionRegistry::<BuiltinFunction<TestContextObject>>::default();
function_registry_a
.register_function_hashed(*b"log", syscalls::bpf_syscall_string)
.unwrap();
function_registry_a
.register_function_hashed(*b"log_64", syscalls::bpf_syscall_u64)
.unwrap();
let mut function_registry_b =
FunctionRegistry::<BuiltinFunction<TestContextObject>>::default();
function_registry_b
.register_function_hashed(*b"log_64", syscalls::bpf_syscall_u64)
.unwrap();
function_registry_b
.register_function_hashed(*b"log", syscalls::bpf_syscall_string)
.unwrap();
let mut function_registry_c =
FunctionRegistry::<BuiltinFunction<TestContextObject | {
let mut registers = [0u64; 12];
// R1 points to beginning of input memory, R10 to the stack of the first frame, R11 is the pc (hidden)
registers[1] = ebpf::MM_INPUT_START;
registers[ebpf::FRAME_PTR_REG] = self.stack_pointer;
registers[11] = executable.get_entrypoint_instruction_offset() as u64;
let config = executable.get_config();
let initial_insn_count = if config.enable_instruction_meter {
self.context_object_pointer.get_remaining()
} else {
0
};
self.previous_instruction_meter = initial_insn_count;
self.program_result = ProgramResult::Ok(0);
let due_insn_count = if interpreted {
#[cfg(feature = "debugger")]
let debug_port = self.debug_port.clone();
let mut interpreter = Interpreter::new(self, executable, registers);
#[cfg(feature = "debugger")]
if let Some(debug_port) = debug_port { | identifier_body |
vm.rs | (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for syscalls)
// Copyright 2020 Solana Maintainers <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
// the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Virtual machine for eBPF programs.
use crate::{
ebpf,
elf::{Executable, FunctionRegistry, SBPFVersion},
error::EbpfError,
interpreter::Interpreter,
memory_region::MemoryMapping,
static_analysis::{Analysis, TraceLogEntry},
};
use std::{collections::BTreeMap, fmt::Debug, mem, sync::Arc};
/// Same as `Result` but provides a stable memory layout
#[derive(Debug)]
#[repr(C, u64)]
pub enum StableResult<T, E> {
/// Success
Ok(T),
/// Failure
Err(E),
}
impl<T: Debug, E: Debug> StableResult<T, E> {
/// `true` if `Ok`
pub fn is_ok(&self) -> bool {
match self {
Self::Ok(_) => true,
Self::Err(_) => false,
}
}
/// `true` if `Err`
pub fn is_err(&self) -> bool {
match self {
Self::Ok(_) => false,
Self::Err(_) => true,
}
}
/// Returns the inner value if `Ok`, panics otherwise
pub fn unwrap(self) -> T {
match self {
Self::Ok(value) => value,
Self::Err(error) => panic!("unwrap {:?}", error),
}
}
/// Returns the inner error if `Err`, panics otherwise
pub fn unwrap_err(self) -> E {
match self {
Self::Ok(value) => panic!("unwrap_err {:?}", value),
Self::Err(error) => error,
}
}
}
impl<T, E> From<StableResult<T, E>> for Result<T, E> {
fn from(result: StableResult<T, E>) -> Self {
match result {
StableResult::Ok(value) => Ok(value),
StableResult::Err(value) => Err(value),
}
}
}
impl<T, E> From<Result<T, E>> for StableResult<T, E> {
fn from(result: Result<T, E>) -> Self {
match result {
Ok(value) => Self::Ok(value),
Err(value) => Self::Err(value),
}
}
}
/// Return value of programs and syscalls
pub type ProgramResult = StableResult<u64, Box<dyn std::error::Error>>;
/// Syscall function without context
pub type BuiltinFunction<C> =
fn(&mut C, u64, u64, u64, u64, u64, &mut MemoryMapping, &mut ProgramResult);
/// Represents the interface to a fixed functionality program
#[derive(Eq)]
pub struct BuiltinProgram<C: ContextObject> {
/// Holds the Config if this is a loader program
config: Option<Box<Config>>,
/// Function pointers by symbol
functions: FunctionRegistry<BuiltinFunction<C>>,
}
impl<C: ContextObject> PartialEq for BuiltinProgram<C> {
fn eq(&self, other: &Self) -> bool {
self.config.eq(&other.config) && self.functions.eq(&other.functions)
}
}
impl<C: ContextObject> BuiltinProgram<C> {
/// Constructs a loader built-in program
pub fn new_loader(config: Config, functions: FunctionRegistry<BuiltinFunction<C>>) -> Self {
Self {
config: Some(Box::new(config)),
functions,
}
}
/// Constructs a built-in program
pub fn new_builtin(functions: FunctionRegistry<BuiltinFunction<C>>) -> Self {
Self {
config: None,
functions,
}
}
/// Constructs a mock loader built-in program
pub fn new_mock() -> Self {
Self {
config: Some(Box::default()),
functions: FunctionRegistry::default(),
}
}
/// Get the configuration settings assuming this is a loader program
pub fn get_config(&self) -> &Config {
self.config.as_ref().unwrap()
}
/// Get the function registry
pub fn get_function_registry(&self) -> &FunctionRegistry<BuiltinFunction<C>> {
&self.functions
}
/// Calculate memory size
pub fn mem_size(&self) -> usize {
mem::size_of::<Self>()
+ if self.config.is_some() {
mem::size_of::<Config>()
} else {
0
}
+ self.functions.mem_size()
}
}
impl<C: ContextObject> Debug for BuiltinProgram<C> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
writeln!(f, "{:?}", unsafe {
// `derive(Debug)` does not know that `C: ContextObject` does not need to implement `Debug`
std::mem::transmute::<
&FunctionRegistry<BuiltinFunction<C>>,
&FunctionRegistry<BuiltinFunction<*const ()>>,
>(&self.functions)
})?;
Ok(())
}
}
/// VM configuration settings
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Config {
/// Maximum call depth
pub max_call_depth: usize,
/// Size of a stack frame in bytes, must match the size specified in the LLVM BPF backend
pub stack_frame_size: usize,
/// Enables the use of MemoryMapping and MemoryRegion for address translation
pub enable_address_translation: bool,
/// Enables gaps in VM address space between the stack frames
pub enable_stack_frame_gaps: bool,
/// Maximal pc distance after which a new instruction meter validation is emitted by the JIT
pub instruction_meter_checkpoint_distance: usize,
/// Enable instruction meter and limiting
pub enable_instruction_meter: bool,
/// Enable instruction tracing
pub enable_instruction_tracing: bool,
/// Enable dynamic string allocation for labels
pub enable_symbol_and_section_labels: bool,
/// Reject ELF files containing issues that the verifier did not catch before (up to v0.2.21)
pub reject_broken_elfs: bool,
/// Ratio of native host instructions per random no-op in JIT (0 = OFF)
pub noop_instruction_rate: u32,
/// Enable disinfection of immediate values and offsets provided by the user in JIT
pub sanitize_user_provided_values: bool,
/// Encrypt the runtime environment in JIT
pub encrypt_runtime_environment: bool,
/// Throw ElfError::SymbolHashCollision when a BPF function collides with a registered syscall
pub external_internal_function_hash_collision: bool,
/// Have the verifier reject "callx r10"
pub reject_callx_r10: bool,
/// Avoid copying read only sections when possible
pub optimize_rodata: bool,
/// Use the new ELF parser
pub new_elf_parser: bool,
/// Use aligned memory mapping
pub aligned_memory_mapping: bool,
/// Allow ExecutableCapability::V1
pub enable_sbpf_v1: bool,
/// Allow ExecutableCapability::V2
pub enable_sbpf_v2: bool,
}
impl Config {
/// Returns the size of the stack memory region
pub fn stack_size(&self) -> usize {
self.stack_frame_size * self.max_call_depth
}
}
impl Default for Config {
fn default() -> Self {
Self {
max_call_depth: 20,
stack_frame_size: 4_096,
enable_address_translation: true,
enable_stack_frame_gaps: true,
instruction_meter_checkpoint_distance: 10000,
enable_instruction_meter: true,
enable_instruction_tracing: false,
enable_symbol_and_section_labels: false,
reject_broken_elfs: false,
noop_instruction_rate: 256,
sanitize_user_provided_values: true,
encrypt_runtime_environment: true,
external_internal_function_hash_collision: true,
reject_callx_r10: true,
optimize_rodata: true,
new_elf_parser: true,
aligned_memory_mapping: true,
enable_sbpf_v1: true,
enable_sbpf_v2: true,
}
}
}
/// Static constructors for Executable
impl<C: ContextObject> Executable<C> {
/// Creates an executable from an ELF file
pub fn from_elf(elf_bytes: &[u8], loader: Arc<BuiltinProgram<C>>) -> Result<Self, EbpfError> {
let executable = Executable::load(elf_bytes, loader)?;
Ok(executable)
}
/// Creates an executable from machine code
pub fn from_text_bytes(
text_bytes: &[u8],
loader: Arc<BuiltinProgram<C>>,
sbpf_version: SBPFVersion,
function_registry: FunctionRegistry<usize>,
) -> Result<Self, EbpfError> {
Executable::new_from_text_bytes(text_bytes, loader, sbpf_version, function_registry)
.map_err(EbpfError::ElfError)
}
}
/// Runtime context
pub trait ContextObject {
/// Called for every instruction executed when tracing is enabled
fn trace(&mut self, state: [u64; 12]);
/// Consume instructions from meter
fn consume(&mut self, amount: u64);
/// Get the number of remaining instructions allowed
fn get_remaining(&self) -> u64;
}
/// Simple instruction meter for testing
#[derive(Debug, Clone, Default)]
pub struct TestContextObject {
/// Contains the register state at every instruction in order of execution
pub trace_log: Vec<TraceLogEntry>,
/// Maximal amount of instructions which still can be executed
pub remaining: u64,
}
impl ContextObject for TestContextObject {
fn trace(&mut self, state: [u64; 12]) {
self.trace_log.push(state);
}
fn consume(&mut self, amount: u64) {
self.remaining = self.remaining.saturating_sub(amount);
}
fn get_remaining(&self) -> u64 {
self.remaining
}
}
impl TestContextObject {
/// Initialize with instruction meter
pub fn new(remaining: u64) -> Self {
Self {
trace_log: Vec::new(),
remaining,
}
}
/// Compares an interpreter trace and a JIT trace.
///
/// The log of the JIT can be longer because it only validates the instruction meter at branches.
pub fn compare_trace_log(interpreter: &Self, jit: &Self) -> bool {
let interpreter = interpreter.trace_log.as_slice();
let mut jit = jit.trace_log.as_slice();
if jit.len() > interpreter.len() {
jit = &jit[0..interpreter.len()];
}
interpreter == jit
}
}
/// Statistic of taken branches (from a recorded trace)
pub struct DynamicAnalysis {
/// Maximal edge counter value
pub edge_counter_max: usize,
/// src_node, dst_node, edge_counter
pub edges: BTreeMap<usize, BTreeMap<usize, usize>>,
}
impl DynamicAnalysis {
/// Accumulates a trace
pub fn new(trace_log: &[[u64; 12]], analysis: &Analysis) -> Self {
let mut result = Self {
edge_counter_max: 0,
edges: BTreeMap::new(),
};
let mut last_basic_block = usize::MAX;
for traced_instruction in trace_log.iter() {
let pc = traced_instruction[11] as usize;
if analysis.cfg_nodes.contains_key(&pc) {
let counter = result
.edges
.entry(last_basic_block)
.or_default()
.entry(pc)
.or_insert(0);
*counter += 1;
result.edge_counter_max = result.edge_counter_max.max(*counter);
last_basic_block = pc;
}
}
result
}
}
/// A call frame used for function calls inside the Interpreter
#[derive(Clone, Default)]
pub struct CallFrame {
/// The caller saved registers
pub caller_saved_registers: [u64; ebpf::SCRATCH_REGS],
/// The callers frame pointer
pub frame_pointer: u64,
/// The target_pc of the exit instruction which returns back to the caller
pub target_pc: usize,
}
/// A virtual machine to run eBPF programs.
///
/// # Examples
///
/// ```
/// use solana_rbpf::{
/// aligned_memory::AlignedMemory,
/// ebpf,
/// elf::{Executable, FunctionRegistry, SBPFVersion},
/// memory_region::{MemoryMapping, MemoryRegion},
/// verifier::{RequisiteVerifier},
/// vm::{BuiltinProgram, Config, EbpfVm, TestContextObject},
/// };
///
/// let prog = &[
/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
/// ];
/// let mem = &mut [
/// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
/// ];
///
/// let loader = std::sync::Arc::new(BuiltinProgram::new_mock());
/// let function_registry = FunctionRegistry::default();
/// let mut executable = Executable::<TestContextObject>::from_text_bytes(prog, loader, SBPFVersion::V2, function_registry).unwrap();
/// executable.verify::<RequisiteVerifier>().unwrap();
/// let mut context_object = TestContextObject::new(1);
/// let config = executable.get_config();
/// let sbpf_version = executable.get_sbpf_version();
///
/// let mut stack = AlignedMemory::<{ebpf::HOST_ALIGN}>::zero_filled(config.stack_size());
/// let stack_len = stack.len();
/// let mut heap = AlignedMemory::<{ebpf::HOST_ALIGN}>::with_capacity(0);
///
/// let regions: Vec<MemoryRegion> = vec![
/// executable.get_ro_region(),
/// MemoryRegion::new_writable(
/// stack.as_slice_mut(),
/// ebpf::MM_STACK_START,
/// ),
/// MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
/// MemoryRegion::new_writable(mem, ebpf::MM_INPUT_START),
/// ];
///
/// let memory_mapping = MemoryMapping::new(regions, config, sbpf_version).unwrap();
///
/// let mut vm = EbpfVm::new(config, sbpf_version, &mut context_object, memory_mapping, stack_len);
///
/// let (instruction_count, result) = vm.execute_program(&executable, true);
/// assert_eq!(instruction_count, 1);
/// assert_eq!(result.unwrap(), 0);
/// ```
#[repr(C)]
pub struct EbpfVm<'a, C: ContextObject> {
/// Needed to exit from the guest back into the host
pub host_stack_pointer: *mut u64,
/// The current call depth.
///
/// Incremented on calls and decremented on exits. It's used to enforce
/// config.max_call_depth and to know when to terminate execution.
pub call_depth: u64,
/// Guest stack pointer (r11).
///
/// The stack pointer isn't exposed as an actual register. Only sub and add
/// instructions (typically generated by the LLVM backend) are allowed to
/// access it when sbpf_version.dynamic_stack_frames()=true. Its value is only
/// stored here and therefore the register is not tracked in REGISTER_MAP.
pub stack_pointer: u64,
/// Pointer to ContextObject
pub context_object_pointer: &'a mut C,
/// Last return value of instruction_meter.get_remaining()
pub previous_instruction_meter: u64,
/// CPU cycles accumulated by the stop watch
pub stopwatch_numerator: u64,
/// Number of times the stop watch was used
pub stopwatch_denominator: u64,
/// ProgramResult inlined
pub program_result: ProgramResult,
/// MemoryMapping inlined
pub memory_mapping: MemoryMapping<'a>,
/// Stack of CallFrames used by the Interpreter
pub call_frames: Vec<CallFrame>,
/// TCP port for the debugger interface
#[cfg(feature = "debugger")] |
impl<'a, C: ContextObject> EbpfVm<'a, C> {
/// Creates a new virtual machine instance.
pub fn new(
config: &Config,
sbpf_version: &SBPFVersion,
context_object: &'a mut C,
mut memory_mapping: MemoryMapping<'a>,
stack_len: usize,
) -> Self {
let stack_pointer =
ebpf::MM_STACK_START.saturating_add(if sbpf_version.dynamic_stack_frames() {
// the stack is fully descending, frames start as empty and change size anytime r11 is modified
stack_len
} else {
// within a frame the stack grows down, but frames are ascending
config.stack_frame_size
} as u64);
if!config.enable_address_translation {
memory_mapping = MemoryMapping::new_identity();
}
EbpfVm {
host_stack_pointer: std::ptr::null_mut(),
call_depth: 0,
stack_pointer,
context_object_pointer: context_object,
previous_instruction_meter: 0,
stopwatch_numerator: 0,
stopwatch_denominator: 0,
program_result: ProgramResult::Ok(0),
memory_mapping,
call_frames: vec![CallFrame::default(); config.max_call_depth],
#[cfg(feature = "debugger")]
debug_port: None,
}
}
/// Execute the program
///
/// If interpreted = `false` then the JIT compiled executable is used.
pub fn execute_program(
&mut self,
executable: &Executable<C>,
interpreted: bool,
) -> (u64, ProgramResult) {
let mut registers = [0u64; 12];
// R1 points to beginning of input memory, R10 to the stack of the first frame, R11 is the pc (hidden)
registers[1] = ebpf::MM_INPUT_START;
registers[ebpf::FRAME_PTR_REG] = self.stack_pointer;
registers[11] = executable.get_entrypoint_instruction_offset() as u64;
let config = executable.get_config();
let initial_insn_count = if config.enable_instruction_meter {
self.context_object_pointer.get_remaining()
} else {
0
};
self.previous_instruction_meter = initial_insn_count;
self.program_result = ProgramResult::Ok(0);
let due_insn_count = if interpreted {
#[cfg(feature = "debugger")]
let debug_port = self.debug_port.clone();
let mut interpreter = Interpreter::new(self, executable, registers);
#[cfg(feature = "debugger")]
if let Some(debug_port) = debug_port {
crate::debugger::execute(&mut interpreter, debug_port);
} else {
while interpreter.step() {}
}
#[cfg(not(feature = "debugger"))]
while interpreter.step() {}
interpreter.due_insn_count
} else {
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
{
let compiled_program = match executable
.get_compiled_program()
.ok_or_else(|| Box::new(EbpfError::JitNotCompiled))
{
Ok(compiled_program) => compiled_program,
Err(error) => return (0, ProgramResult::Err(error)),
};
let instruction_meter_final =
compiled_program.invoke(config, self, registers).max(0) as u64;
self.context_object_pointer
.get_remaining()
.saturating_sub(instruction_meter_final)
}
#[cfg(not(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64")))]
{
return (0, ProgramResult::Err(Box::new(EbpfError::JitNotCompiled)));
}
};
let instruction_count = if config.enable_instruction_meter {
self.context_object_pointer.consume(due_insn_count);
initial_insn_count.saturating_sub(self.context_object_pointer.get_remaining())
} else {
0
};
let mut result = ProgramResult::Ok(0);
std::mem::swap(&mut result, &mut self.program_result);
(instruction_count, result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::syscalls;
#[test]
fn test_program_result_is_stable() {
let ok = ProgramResult::Ok(42);
assert_eq!(unsafe { *(&ok as *const _ as *const u64) }, 0);
let err = ProgramResult::Err(Box::new(EbpfError::JitNotCompiled));
assert_eq!(unsafe { *(&err as *const _ as *const u64) }, 1);
}
#[test]
fn test_builtin_program_eq() {
let mut function_registry_a =
FunctionRegistry::<BuiltinFunction<TestContextObject>>::default();
function_registry_a
.register_function_hashed(*b"log", syscalls::bpf_syscall_string)
.unwrap();
function_registry_a
.register_function_hashed(*b"log_64", syscalls::bpf_syscall_u64)
.unwrap();
let mut function_registry_b =
FunctionRegistry::<BuiltinFunction<TestContextObject>>::default();
function_registry_b
.register_function_hashed(*b"log_64", syscalls::bpf_syscall_u64)
.unwrap();
function_registry_b
.register_function_hashed(*b"log", syscalls::bpf_syscall_string)
.unwrap();
let mut function_registry_c =
FunctionRegistry::<BuiltinFunction<TestContextObject>>::default | pub debug_port: Option<u16>,
} | random_line_split |
vm.rs | Translation to Rust, MetaBuff/multiple classes addition, hashmaps for syscalls)
// Copyright 2020 Solana Maintainers <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
// the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Virtual machine for eBPF programs.
use crate::{
ebpf,
elf::{Executable, FunctionRegistry, SBPFVersion},
error::EbpfError,
interpreter::Interpreter,
memory_region::MemoryMapping,
static_analysis::{Analysis, TraceLogEntry},
};
use std::{collections::BTreeMap, fmt::Debug, mem, sync::Arc};
/// Same as `Result` but provides a stable memory layout
#[derive(Debug)]
#[repr(C, u64)]
pub enum StableResult<T, E> {
/// Success
Ok(T),
/// Failure
Err(E),
}
impl<T: Debug, E: Debug> StableResult<T, E> {
/// `true` if `Ok`
pub fn is_ok(&self) -> bool {
match self {
Self::Ok(_) => true,
Self::Err(_) => false,
}
}
/// `true` if `Err`
pub fn is_err(&self) -> bool {
match self {
Self::Ok(_) => false,
Self::Err(_) => true,
}
}
/// Returns the inner value if `Ok`, panics otherwise
pub fn unwrap(self) -> T {
match self {
Self::Ok(value) => value,
Self::Err(error) => panic!("unwrap {:?}", error),
}
}
/// Returns the inner error if `Err`, panics otherwise
pub fn unwrap_err(self) -> E {
match self {
Self::Ok(value) => panic!("unwrap_err {:?}", value),
Self::Err(error) => error,
}
}
}
impl<T, E> From<StableResult<T, E>> for Result<T, E> {
fn from(result: StableResult<T, E>) -> Self {
match result {
StableResult::Ok(value) => Ok(value),
StableResult::Err(value) => Err(value),
}
}
}
impl<T, E> From<Result<T, E>> for StableResult<T, E> {
fn from(result: Result<T, E>) -> Self {
match result {
Ok(value) => Self::Ok(value),
Err(value) => Self::Err(value),
}
}
}
/// Return value of programs and syscalls
pub type ProgramResult = StableResult<u64, Box<dyn std::error::Error>>;
/// Syscall function without context
pub type BuiltinFunction<C> =
fn(&mut C, u64, u64, u64, u64, u64, &mut MemoryMapping, &mut ProgramResult);
/// Represents the interface to a fixed functionality program
#[derive(Eq)]
pub struct BuiltinProgram<C: ContextObject> {
/// Holds the Config if this is a loader program
config: Option<Box<Config>>,
/// Function pointers by symbol
functions: FunctionRegistry<BuiltinFunction<C>>,
}
impl<C: ContextObject> PartialEq for BuiltinProgram<C> {
fn eq(&self, other: &Self) -> bool {
self.config.eq(&other.config) && self.functions.eq(&other.functions)
}
}
impl<C: ContextObject> BuiltinProgram<C> {
/// Constructs a loader built-in program
pub fn new_loader(config: Config, functions: FunctionRegistry<BuiltinFunction<C>>) -> Self {
Self {
config: Some(Box::new(config)),
functions,
}
}
/// Constructs a built-in program
pub fn new_builtin(functions: FunctionRegistry<BuiltinFunction<C>>) -> Self {
Self {
config: None,
functions,
}
}
/// Constructs a mock loader built-in program
pub fn new_mock() -> Self {
Self {
config: Some(Box::default()),
functions: FunctionRegistry::default(),
}
}
/// Get the configuration settings assuming this is a loader program
pub fn get_config(&self) -> &Config {
self.config.as_ref().unwrap()
}
/// Get the function registry
pub fn get_function_registry(&self) -> &FunctionRegistry<BuiltinFunction<C>> {
&self.functions
}
/// Calculate memory size
pub fn mem_size(&self) -> usize {
mem::size_of::<Self>()
+ if self.config.is_some() {
mem::size_of::<Config>()
} else {
0
}
+ self.functions.mem_size()
}
}
impl<C: ContextObject> Debug for BuiltinProgram<C> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
writeln!(f, "{:?}", unsafe {
// `derive(Debug)` does not know that `C: ContextObject` does not need to implement `Debug`
std::mem::transmute::<
&FunctionRegistry<BuiltinFunction<C>>,
&FunctionRegistry<BuiltinFunction<*const ()>>,
>(&self.functions)
})?;
Ok(())
}
}
/// VM configuration settings
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Config {
/// Maximum call depth
pub max_call_depth: usize,
/// Size of a stack frame in bytes, must match the size specified in the LLVM BPF backend
pub stack_frame_size: usize,
/// Enables the use of MemoryMapping and MemoryRegion for address translation
pub enable_address_translation: bool,
/// Enables gaps in VM address space between the stack frames
pub enable_stack_frame_gaps: bool,
/// Maximal pc distance after which a new instruction meter validation is emitted by the JIT
pub instruction_meter_checkpoint_distance: usize,
/// Enable instruction meter and limiting
pub enable_instruction_meter: bool,
/// Enable instruction tracing
pub enable_instruction_tracing: bool,
/// Enable dynamic string allocation for labels
pub enable_symbol_and_section_labels: bool,
/// Reject ELF files containing issues that the verifier did not catch before (up to v0.2.21)
pub reject_broken_elfs: bool,
/// Ratio of native host instructions per random no-op in JIT (0 = OFF)
pub noop_instruction_rate: u32,
/// Enable disinfection of immediate values and offsets provided by the user in JIT
pub sanitize_user_provided_values: bool,
/// Encrypt the runtime environment in JIT
pub encrypt_runtime_environment: bool,
/// Throw ElfError::SymbolHashCollision when a BPF function collides with a registered syscall
pub external_internal_function_hash_collision: bool,
/// Have the verifier reject "callx r10"
pub reject_callx_r10: bool,
/// Avoid copying read only sections when possible
pub optimize_rodata: bool,
/// Use the new ELF parser
pub new_elf_parser: bool,
/// Use aligned memory mapping
pub aligned_memory_mapping: bool,
/// Allow ExecutableCapability::V1
pub enable_sbpf_v1: bool,
/// Allow ExecutableCapability::V2
pub enable_sbpf_v2: bool,
}
impl Config {
/// Returns the size of the stack memory region
pub fn stack_size(&self) -> usize {
self.stack_frame_size * self.max_call_depth
}
}
impl Default for Config {
fn default() -> Self {
Self {
max_call_depth: 20,
stack_frame_size: 4_096,
enable_address_translation: true,
enable_stack_frame_gaps: true,
instruction_meter_checkpoint_distance: 10000,
enable_instruction_meter: true,
enable_instruction_tracing: false,
enable_symbol_and_section_labels: false,
reject_broken_elfs: false,
noop_instruction_rate: 256,
sanitize_user_provided_values: true,
encrypt_runtime_environment: true,
external_internal_function_hash_collision: true,
reject_callx_r10: true,
optimize_rodata: true,
new_elf_parser: true,
aligned_memory_mapping: true,
enable_sbpf_v1: true,
enable_sbpf_v2: true,
}
}
}
/// Static constructors for Executable
impl<C: ContextObject> Executable<C> {
/// Creates an executable from an ELF file
pub fn from_elf(elf_bytes: &[u8], loader: Arc<BuiltinProgram<C>>) -> Result<Self, EbpfError> {
let executable = Executable::load(elf_bytes, loader)?;
Ok(executable)
}
/// Creates an executable from machine code
pub fn from_text_bytes(
text_bytes: &[u8],
loader: Arc<BuiltinProgram<C>>,
sbpf_version: SBPFVersion,
function_registry: FunctionRegistry<usize>,
) -> Result<Self, EbpfError> {
Executable::new_from_text_bytes(text_bytes, loader, sbpf_version, function_registry)
.map_err(EbpfError::ElfError)
}
}
/// Runtime context
pub trait ContextObject {
/// Called for every instruction executed when tracing is enabled
fn trace(&mut self, state: [u64; 12]);
/// Consume instructions from meter
fn consume(&mut self, amount: u64);
/// Get the number of remaining instructions allowed
fn get_remaining(&self) -> u64;
}
/// Simple instruction meter for testing
#[derive(Debug, Clone, Default)]
pub struct TestContextObject {
/// Contains the register state at every instruction in order of execution
pub trace_log: Vec<TraceLogEntry>,
/// Maximal amount of instructions which still can be executed
pub remaining: u64,
}
impl ContextObject for TestContextObject {
fn trace(&mut self, state: [u64; 12]) {
self.trace_log.push(state);
}
fn consume(&mut self, amount: u64) {
self.remaining = self.remaining.saturating_sub(amount);
}
fn get_remaining(&self) -> u64 {
self.remaining
}
}
impl TestContextObject {
/// Initialize with instruction meter
pub fn new(remaining: u64) -> Self {
Self {
trace_log: Vec::new(),
remaining,
}
}
/// Compares an interpreter trace and a JIT trace.
///
/// The log of the JIT can be longer because it only validates the instruction meter at branches.
pub fn compare_trace_log(interpreter: &Self, jit: &Self) -> bool {
let interpreter = interpreter.trace_log.as_slice();
let mut jit = jit.trace_log.as_slice();
if jit.len() > interpreter.len() {
jit = &jit[0..interpreter.len()];
}
interpreter == jit
}
}
/// Statistic of taken branches (from a recorded trace)
pub struct DynamicAnalysis {
/// Maximal edge counter value
pub edge_counter_max: usize,
/// src_node, dst_node, edge_counter
pub edges: BTreeMap<usize, BTreeMap<usize, usize>>,
}
impl DynamicAnalysis {
/// Accumulates a trace
pub fn new(trace_log: &[[u64; 12]], analysis: &Analysis) -> Self {
let mut result = Self {
edge_counter_max: 0,
edges: BTreeMap::new(),
};
let mut last_basic_block = usize::MAX;
for traced_instruction in trace_log.iter() {
let pc = traced_instruction[11] as usize;
if analysis.cfg_nodes.contains_key(&pc) {
let counter = result
.edges
.entry(last_basic_block)
.or_default()
.entry(pc)
.or_insert(0);
*counter += 1;
result.edge_counter_max = result.edge_counter_max.max(*counter);
last_basic_block = pc;
}
}
result
}
}
/// A call frame used for function calls inside the Interpreter
#[derive(Clone, Default)]
pub struct CallFrame {
/// The caller saved registers
pub caller_saved_registers: [u64; ebpf::SCRATCH_REGS],
/// The callers frame pointer
pub frame_pointer: u64,
/// The target_pc of the exit instruction which returns back to the caller
pub target_pc: usize,
}
/// A virtual machine to run eBPF programs.
///
/// # Examples
///
/// ```
/// use solana_rbpf::{
/// aligned_memory::AlignedMemory,
/// ebpf,
/// elf::{Executable, FunctionRegistry, SBPFVersion},
/// memory_region::{MemoryMapping, MemoryRegion},
/// verifier::{RequisiteVerifier},
/// vm::{BuiltinProgram, Config, EbpfVm, TestContextObject},
/// };
///
/// let prog = &[
/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
/// ];
/// let mem = &mut [
/// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
/// ];
///
/// let loader = std::sync::Arc::new(BuiltinProgram::new_mock());
/// let function_registry = FunctionRegistry::default();
/// let mut executable = Executable::<TestContextObject>::from_text_bytes(prog, loader, SBPFVersion::V2, function_registry).unwrap();
/// executable.verify::<RequisiteVerifier>().unwrap();
/// let mut context_object = TestContextObject::new(1);
/// let config = executable.get_config();
/// let sbpf_version = executable.get_sbpf_version();
///
/// let mut stack = AlignedMemory::<{ebpf::HOST_ALIGN}>::zero_filled(config.stack_size());
/// let stack_len = stack.len();
/// let mut heap = AlignedMemory::<{ebpf::HOST_ALIGN}>::with_capacity(0);
///
/// let regions: Vec<MemoryRegion> = vec![
/// executable.get_ro_region(),
/// MemoryRegion::new_writable(
/// stack.as_slice_mut(),
/// ebpf::MM_STACK_START,
/// ),
/// MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
/// MemoryRegion::new_writable(mem, ebpf::MM_INPUT_START),
/// ];
///
/// let memory_mapping = MemoryMapping::new(regions, config, sbpf_version).unwrap();
///
/// let mut vm = EbpfVm::new(config, sbpf_version, &mut context_object, memory_mapping, stack_len);
///
/// let (instruction_count, result) = vm.execute_program(&executable, true);
/// assert_eq!(instruction_count, 1);
/// assert_eq!(result.unwrap(), 0);
/// ```
#[repr(C)]
pub struct EbpfVm<'a, C: ContextObject> {
/// Needed to exit from the guest back into the host
pub host_stack_pointer: *mut u64,
/// The current call depth.
///
/// Incremented on calls and decremented on exits. It's used to enforce
/// config.max_call_depth and to know when to terminate execution.
pub call_depth: u64,
/// Guest stack pointer (r11).
///
/// The stack pointer isn't exposed as an actual register. Only sub and add
/// instructions (typically generated by the LLVM backend) are allowed to
/// access it when sbpf_version.dynamic_stack_frames()=true. Its value is only
/// stored here and therefore the register is not tracked in REGISTER_MAP.
pub stack_pointer: u64,
/// Pointer to ContextObject
pub context_object_pointer: &'a mut C,
/// Last return value of instruction_meter.get_remaining()
pub previous_instruction_meter: u64,
/// CPU cycles accumulated by the stop watch
pub stopwatch_numerator: u64,
/// Number of times the stop watch was used
pub stopwatch_denominator: u64,
/// ProgramResult inlined
pub program_result: ProgramResult,
/// MemoryMapping inlined
pub memory_mapping: MemoryMapping<'a>,
/// Stack of CallFrames used by the Interpreter
pub call_frames: Vec<CallFrame>,
/// TCP port for the debugger interface
#[cfg(feature = "debugger")]
pub debug_port: Option<u16>,
}
impl<'a, C: ContextObject> EbpfVm<'a, C> {
/// Creates a new virtual machine instance.
pub fn new(
config: &Config,
sbpf_version: &SBPFVersion,
context_object: &'a mut C,
mut memory_mapping: MemoryMapping<'a>,
stack_len: usize,
) -> Self {
let stack_pointer =
ebpf::MM_STACK_START.saturating_add(if sbpf_version.dynamic_stack_frames() {
// the stack is fully descending, frames start as empty and change size anytime r11 is modified
stack_len
} else {
// within a frame the stack grows down, but frames are ascending
config.stack_frame_size
} as u64);
if!config.enable_address_translation {
memory_mapping = MemoryMapping::new_identity();
}
EbpfVm {
host_stack_pointer: std::ptr::null_mut(),
call_depth: 0,
stack_pointer,
context_object_pointer: context_object,
previous_instruction_meter: 0,
stopwatch_numerator: 0,
stopwatch_denominator: 0,
program_result: ProgramResult::Ok(0),
memory_mapping,
call_frames: vec![CallFrame::default(); config.max_call_depth],
#[cfg(feature = "debugger")]
debug_port: None,
}
}
/// Execute the program
///
/// If interpreted = `false` then the JIT compiled executable is used.
pub fn execute_program(
&mut self,
executable: &Executable<C>,
interpreted: bool,
) -> (u64, ProgramResult) {
let mut registers = [0u64; 12];
// R1 points to beginning of input memory, R10 to the stack of the first frame, R11 is the pc (hidden)
registers[1] = ebpf::MM_INPUT_START;
registers[ebpf::FRAME_PTR_REG] = self.stack_pointer;
registers[11] = executable.get_entrypoint_instruction_offset() as u64;
let config = executable.get_config();
let initial_insn_count = if config.enable_instruction_meter {
self.context_object_pointer.get_remaining()
} else {
0
};
self.previous_instruction_meter = initial_insn_count;
self.program_result = ProgramResult::Ok(0);
let due_insn_count = if interpreted {
#[cfg(feature = "debugger")]
let debug_port = self.debug_port.clone();
let mut interpreter = Interpreter::new(self, executable, registers);
#[cfg(feature = "debugger")]
if let Some(debug_port) = debug_port {
crate::debugger::execute(&mut interpreter, debug_port);
} else {
while interpreter.step() {}
}
#[cfg(not(feature = "debugger"))]
while interpreter.step() {}
interpreter.due_insn_count
} else | }
;
let instruction_count = if config.enable_instruction_meter {
self.context_object_pointer.consume(due_insn_count);
initial_insn_count.saturating_sub(self.context_object_pointer.get_remaining())
} else {
0
};
let mut result = ProgramResult::Ok(0);
std::mem::swap(&mut result, &mut self.program_result);
(instruction_count, result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::syscalls;
#[test]
fn test_program_result_is_stable() {
let ok = ProgramResult::Ok(42);
assert_eq!(unsafe { *(&ok as *const _ as *const u64) }, 0);
let err = ProgramResult::Err(Box::new(EbpfError::JitNotCompiled));
assert_eq!(unsafe { *(&err as *const _ as *const u64) }, 1);
}
#[test]
fn test_builtin_program_eq() {
let mut function_registry_a =
FunctionRegistry::<BuiltinFunction<TestContextObject>>::default();
function_registry_a
.register_function_hashed(*b"log", syscalls::bpf_syscall_string)
.unwrap();
function_registry_a
.register_function_hashed(*b"log_64", syscalls::bpf_syscall_u64)
.unwrap();
let mut function_registry_b =
FunctionRegistry::<BuiltinFunction<TestContextObject>>::default();
function_registry_b
.register_function_hashed(*b"log_64", syscalls::bpf_syscall_u64)
.unwrap();
function_registry_b
.register_function_hashed(*b"log", syscalls::bpf_syscall_string)
.unwrap();
let mut function_registry_c =
FunctionRegistry::<BuiltinFunction<TestContext | {
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
{
let compiled_program = match executable
.get_compiled_program()
.ok_or_else(|| Box::new(EbpfError::JitNotCompiled))
{
Ok(compiled_program) => compiled_program,
Err(error) => return (0, ProgramResult::Err(error)),
};
let instruction_meter_final =
compiled_program.invoke(config, self, registers).max(0) as u64;
self.context_object_pointer
.get_remaining()
.saturating_sub(instruction_meter_final)
}
#[cfg(not(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64")))]
{
return (0, ProgramResult::Err(Box::new(EbpfError::JitNotCompiled)));
} | conditional_block |
ed25519.rs | :
// - Isis Agora Lovecruft <[email protected]>
//! A Rust implementation of ed25519 EdDSA key generation, signing, and
//! verification.
use core::fmt::Debug;
#[cfg(feature = "std")]
use rand::Rng;
use digest::BlockInput;
use digest::Digest;
use digest::Input;
use digest::FixedOutput;
use generic_array::typenum::U64;
use curve25519_dalek::constants;
use curve25519_dalek::edwards::CompressedEdwardsY;
use curve25519_dalek::edwards::ExtendedPoint;
use curve25519_dalek::scalar::Scalar;
use subtle::slices_equal;
/// The length of an ed25519 EdDSA `Signature`, in bytes.
pub const SIGNATURE_LENGTH: usize = 64;
/// The length of an ed25519 EdDSA `SecretKey`, in bytes.
pub const SECRET_KEY_LENGTH: usize = 32;
/// The length of an ed25519 EdDSA `PublicKey`, in bytes.
pub const PUBLIC_KEY_LENGTH: usize = 32;
/// An EdDSA signature.
///
/// # Note
///
/// These signatures, unlike the ed25519 signature reference implementation, are
/// "detached"—that is, they do **not** include a copy of the message which has
/// been signed.
#[derive(Copy)]
#[repr(C)]
pub struct Signature(pub [u8; SIGNATURE_LENGTH]);
impl Clone for Signature {
fn clone(&self) -> Self { *self }
}
impl Debug for Signature {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Signature([{:?}])", &self.0[..])
}
}
impl Eq for Signature {}
impl PartialEq for Signature {
fn eq(&self, other: &Signature) -> bool {
let mut equal: u8 = 0;
for i in 0..64 {
equal |= self.0[i] ^ other.0[i];
}
if equal == 0 {
return true;
} else {
return false;
}
}
}
impl Signature {
/// View this `Signature` as a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; SIGNATURE_LENGTH] {
self.0
}
/// View this `Signature` as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; SIGNATURE_LENGTH] {
&self.0
}
/// Construct a `Signature` from a slice of bytes.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> Signature {
Signature(*array_ref!(bytes, 0, SIGNATURE_LENGTH))
}
}
/// An EdDSA secret key.
#[repr(C)]
pub struct SecretKey(pub [u8; SECRET_KEY_LENGTH]);
impl Debug for SecretKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "SecretKey: {:?}", &self.0[..])
}
}
impl SecretKey {
/// Convert this secret key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; SECRET_KEY_LENGTH] {
self.0
}
/// View this secret key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; SECRET_KEY_LENGTH] {
&self.0
}
/// Construct a `SecretKey` from a slice of bytes.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::SECRET_KEY_LENGTH;
///
/// let secret_key_bytes: [u8; SECRET_KEY_LENGTH] = [
/// 157, 097, 177, 157, 239, 253, 090, 096,
/// 186, 132, 074, 244, 146, 236, 044, 196,
/// 068, 073, 197, 105, 123, 050, 105, 025,
/// 112, 059, 172, 003, 028, 174, 127, 096, ];
///
/// let secret_key: SecretKey = SecretKey::from_bytes(&secret_key_bytes[..]);
/// # }
/// ```
///
/// # Returns
///
/// An EdDSA `SecretKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> SecretKey {
SecretKey(*array_ref!(bytes, 0, SECRET_KEY_LENGTH))
}
/// Generate a `SecretKey` from a `csprng`.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::Signature;
///
/// let mut csprng: OsRng = OsRng::new().unwrap();
/// let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// # }
/// ```
///
/// Afterwards, you can generate the corresponding public—provided you also
/// supply a hash function which implements the `Digest` and `Default`
/// traits, and which returns 512 bits of output—via:
///
/// ```
/// # extern crate rand;
/// # extern crate sha2;
/// # extern crate ed25519_dalek;
/// #
/// # fn main() {
/// #
/// # use rand::Rng;
/// # use rand::OsRng;
/// # use sha2::Sha512;
/// # use ed25519_dalek::PublicKey;
/// # use ed25519_dalek::SecretKey;
/// # use ed25519_dalek::Signature;
/// #
/// # let mut csprng: OsRng = OsRng::new().unwrap();
/// # let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// let public_key: PublicKey = PublicKey::from_secret::<Sha512>(&secret_key);
/// # }
/// ```
///
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
///
/// # Input
///
/// A CSPRING with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
#[cfg(feature = "std")]
pub fn generate(csprng: &mut Rng) -> SecretKey {
let mut sk: SecretKey = SecretKey([0u8; 32]);
csprng.fill_bytes(&mut sk.0);
sk
}
}
/// An ed25519 public key.
#[derive(Copy, Clone)]
#[repr(C)]
pub struct PublicKey(pub CompressedEdwardsY);
impl Debug for PublicKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "PublicKey( CompressedPoint( {:?} ))", self.0)
}
}
impl PublicKey {
/// Convert this public key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; PUBLIC_KEY_LENGTH] {
self.0.to_bytes()
}
/// View this public key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; PUBLIC_KEY_LENGTH] {
&(self.0).0
}
/// Construct a `PublicKey` from a slice of bytes.
///
/// # Warning
///
/// The caller is responsible for ensuring that the bytes passed into this
/// method actually represent a `curve25519_dalek::curve::CompressedEdwardsY`
/// and that said compressed point is actually a point on the curve.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::PUBLIC_KEY_LENGTH;
///
/// let public_key_bytes: [u8; PUBLIC_KEY_LENGTH] = [
/// 215, 90, 152, 1, 130, 177, 10, 183, 213, 75, 254, 211, 201, 100, 7, 58,
/// 14, 225, 114, 243, 218, 166, 35, 37, 175, 2, 26, 104, 247, 7, 81, 26];
///
/// let public_key: PublicKey = PublicKey::from_bytes(&public_key_bytes);
/// # }
/// ```
///
/// # Returns
///
/// A `PublicKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> PublicKey {
PublicKey(CompressedEdwardsY(*array_ref!(bytes, 0, 32)))
}
/// Convert this public key to its underlying extended twisted Edwards coordinate.
#[inline]
fn decompress(&self) -> Option<ExtendedPoint> {
self.0.decompress()
}
/// Derive this public key from its corresponding `SecretKey`.
#[cfg(feature = "std")]
#[allow(unused_assignments)]
pub fn from_secret<D>(secret_key: &SecretKey) -> PublicKey
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let pk: [u8; 32];
let mut digest: &mut [u8; 32];
h.input(secret_key.as_bytes());
hash.copy_from_slice(h.fixed_result().as_slice());
digest = array_mut_ref!(&mut hash, 0, 32);
digest[0] &= 248;
digest[31] &= 127;
digest[31] |= 64;
pk = (&Scalar(*digest) * &constants::ED25519_BASEPOINT_TABLE).compress().to_bytes();
PublicKey(CompressedEdwardsY(pk))
}
/// Verify a signature on a message with this keypair's public key.
///
/// # Return
///
/// Returns true if the signature was successfully verified, and
/// false otherwise.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: Digest<OutputSize = U64> + Default {
use curve25519_dalek::edwards::vartime;
let mut h: D = D::default();
let mut a: ExtendedPoint;
let ao: Option<ExtendedPoint>;
let r: ExtendedPoint;
let digest: [u8; 64];
let digest_reduced: Scalar;
if signature.0[63] & 224!= 0 {
return false;
}
ao = self.decompress();
if ao.is_some() {
a = ao.unwrap();
} else {
return false;
}
a = -(&a);
let top_half: &[u8; 32] = array_ref!(&signature.0, 32, 32);
let bottom_half: &[u8; 32] = array_ref!(&signature.0, 0, 32);
h.input(&bottom_half[..]);
h.input(&self.to_bytes());
h.input(&message);
let digest_bytes = h.fixed_result();
digest = *array_ref!(digest_bytes, 0, 64);
digest_reduced = Scalar::reduce(&digest);
r = vartime::double_scalar_mult_basepoint(&digest_reduced, &a, &Scalar(*top_half));
slices_equal(bottom_half, &r.compress().to_bytes()) == 1
}
}
/// An ed25519 keypair.
#[derive(Debug)]
#[repr(C)]
pub struct Keypair {
/// The public half of this keypair.
pub public: PublicKey,
/// The secret half of this keypair.
pub secret: SecretKey,
}
impl Keypair {
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `public`: a `[u8; 32]` representing the compressed Edwards-Y
/// coordinate of a point on curve25519.
/// * `secret`: a `[u8; 32]` representing the corresponding secret key.
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Keypair`.
pub fn from_bytes<'a>(public: &'a [u8; 32], secret: &'a [u8; 32]) -> Keypair {
Keypair{ public: PublicKey::from_bytes(public),
secret: SecretKey::from_bytes(secret), }
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let mut cspring: OsRng = OsRng::new().unwrap();
/// let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
///
/// # }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
#[cfg(feature = "std")]
pub fn generate<D>(csprng: &mut Rng) -> Keypair
where D: Digest<OutputSize = U64> + Default {
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = PublicKey::from_secret::<D>(&sk);
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign<D>(&self, message: &[u8]) -> Signature
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let mut signature_bytes: [u8; 64] = [0u8; SIGNATURE_LENGTH];
let mut expanded_key_secret: Scalar;
let mesg_digest: Scalar;
let hram_digest: Scalar;
let r: ExtendedPoint;
let s: Scalar;
let t: CompressedEdwardsY;
let secret_key: &[u8; 32] = self.secret.as_bytes();
let public_key: &[u8; 32] = self.public.as_bytes();
h.input(secret_key);
hash.copy_from_slice(h.fixed_result().as_slice());
expanded_key_secret = Scalar(*array_ref!(&hash, 0, 32));
expanded_key_secret[0] &= 248;
expanded_key_secret[31] &= 63;
expanded_key_secret[31] |= 64;
h = D::default();
h.input(&hash[32..]);
h.input(&message);
hash.copy_from_slice(h.fixed_result().as_slice());
mesg_digest = Scalar::reduce(&hash);
r = &mesg_digest * &constants::ED25519_BASEPOINT_TABLE;
h = D::default();
h.input(&r.compress().to_bytes()[..]);
h.input(public_key);
h.input(&message);
hash.copy_from_slice(h.fixed_result().as_slice());
hram_digest = Scalar::reduce(&hash);
s = Scalar::multiply_add(&hram_digest, &expanded_key_secret, &mesg_digest);
t = r.compress();
signature_bytes[..32].copy_from_slice(&t.0);
signature_bytes[32..64].copy_from_slice(&s.0);
Signature(*array_ref!(&signature_bytes, 0, 64))
}
/// Verify a signature on a message with this keypair's public key.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: FixedOutput<OutputSize = U64> + BlockInput + Default + Input {
self.public.verify::<D>(message, signature)
}
}
#[cfg(test)]
mod test {
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
use std::string::String;
use std::vec::Vec;
use curve25519_dalek::edwards::ExtendedPoint;
use rand::OsRng;
use hex::FromHex;
use sha2::Sha512;
use super::*;
#[test]
fn unmarshal_marshal() { // TestUnmarshalMarshal
let mut cspring: OsRng;
let mut keypair: Keypair;
let mut x: Option<ExtendedPoint>;
let a: ExtendedPoint;
let public: PublicKey;
cspring = OsRng::new().unwrap();
// from_bytes() fails if vx²-u=0 and vx²+u=0
loop {
keypair = Keypair::generate::<Sha512>(&mut cspring);
x = keypair.public.decompress();
if x.is_some() {
a = x.unwrap();
break;
}
}
public = PublicKey(a.compress());
assert!(keypair.public.0 == public.0);
}
#[test]
fn sign_ver | TestSignVerify
let mut cspring: OsRng;
let keypair: Keypair;
let good_sig: Signature;
let bad_sig: Signature;
let good: &[u8] = "test message".as_bytes();
let bad: &[u8] = "wrong message".as_bytes();
cspring = OsRng::new().unwrap();
keypair = Keypair::generate::<Sha512>(&mut cspring);
good_sig = keypair.sign::<Sha512>(&good);
bad_sig = keypair.sign::<Sha512>(&bad);
assert!(keypair.verify::<Sha512>(&good, &good_sig) == true,
"Verification of a valid signature failed!");
assert!(keypair.verify::<Sha512>(&good, &bad_sig) == false,
"Verification of a signature on a different message passed!");
assert!(keypair.verify::<Sha512>(&bad, &good_sig) == false,
"Verification of a signature on a different message passed!");
}
// TESTVECTORS is taken from sign.input.gz in agl's ed25519 Golang
// package. It is a selection of test cases from
// http://ed25519.cr.yp.to/python/sign.input
#[cfg(test)]
#[cfg(not(release))]
#[test]
fn golden() { // TestGolden
let mut line: String;
let mut lineno: usize = 0;
let f = File::open("TESTVECTORS");
if f.is_err() {
println!("This test is only available when the code has been cloned \
from the git repository, since the TESTVECTORS file is large \
and is therefore not included within the distributed crate.");
panic!();
}
let file = BufReader::new(f.unwrap());
for l in file.lines() {
lineno += 1;
line = l.unwrap();
let parts: Vec<&str> = line.split(':').collect();
assert_eq!(parts.len(), 5, "wrong number of fields in line {}", lineno);
let sec_bytes: Vec<u8>= FromHex::from_hex(&parts[0]).unwrap();
let pub_bytes: Vec<u8> = FromHex::from_hex(&parts[1]).unwrap();
let message: Vec<u8> = FromHex::from_hex(&parts[2]).unwrap();
let sig_bytes: Vec<u8> = FromHex::from_hex(&parts[3]).unwrap();
// The signatures in the test vectors also include the message
// at the end, but we just want R and S.
let sig1: Signature = Signature::from_bytes(sig_bytes.as_ref());
let keypair: Keypair = Keypair::from_bytes(
array_ref!(*pub_bytes, 0, PUBLIC_KEY_LENGTH),
array_ref!(*sec_bytes, 0, SECRET_KEY_LENGTH));
let sig2: Signature = keypair.sign::<Sha512>(&message);
assert!(sig1 == sig2, "Signature bytes not equal on line {}", lineno);
assert!(keypair.verify::<Sha512>(&message, &sig2),
"Signature verification failed on line {}", lineno);
}
}
}
#[cfg(all(test, feature = "bench"))]
mod bench {
use test::Bencher;
use rand::OsRng;
use sha2::Sha512;
use super::*;
/// A fake RNG which simply returns zeroes.
struct ZeroRng;
impl ZeroRng {
pub fn new() -> ZeroRng {
ZeroRng
}
}
impl Rng for ZeroRng {
fn next_u32(&mut self) -> u32 { 0u32 }
fn fill_bytes(&mut self, bytes: &mut [u8]) {
for i in 0.. bytes.len() {
bytes[i] = 0;
}
}
}
#[bench]
fn sign(b: &mut Bencher) {
let mut cspring: OsRng = OsRng::new().unwrap();
let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
let msg: &[u8] = b"";
b.iter(| | keypair.sign::<Sha512>(msg));
}
#[bench]
fn verify(b: &mut Bencher) {
let mut cspring: OsRng = OsRng::new().unwrap();
let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
let msg: &[u8] = b"";
let sig: Signature = keypair.sign::<Sha512>(msg);
b.iter(| | keypair.verify::<Sha512>(msg, &sig));
}
#[bench]
fn key_generation(b: &mut Bencher) {
let mut rng: ZeroRng = ZeroRng::new();
b.iter(| | Keypair::generate::<Sha512>(&mut rng));
}
#[bench]
fn underlying_scalar_mult_basepoint(b: &mut Bencher) {
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
let scalar: Scalar = Scalar([ 20, 130, 129, 196, 247, 182, 211, 102,
11, 168, 169, 131, 159, 69, 126, 35,
| ify() { // | identifier_name |
ed25519.rs | Authors:
// - Isis Agora Lovecruft <[email protected]>
//! A Rust implementation of ed25519 EdDSA key generation, signing, and
//! verification.
use core::fmt::Debug;
#[cfg(feature = "std")]
use rand::Rng;
use digest::BlockInput;
use digest::Digest;
use digest::Input;
use digest::FixedOutput;
use generic_array::typenum::U64;
use curve25519_dalek::constants;
use curve25519_dalek::edwards::CompressedEdwardsY;
use curve25519_dalek::edwards::ExtendedPoint;
use curve25519_dalek::scalar::Scalar;
use subtle::slices_equal;
/// The length of an ed25519 EdDSA `Signature`, in bytes.
pub const SIGNATURE_LENGTH: usize = 64;
/// The length of an ed25519 EdDSA `SecretKey`, in bytes.
pub const SECRET_KEY_LENGTH: usize = 32;
/// The length of an ed25519 EdDSA `PublicKey`, in bytes.
pub const PUBLIC_KEY_LENGTH: usize = 32;
/// An EdDSA signature.
///
/// # Note
///
/// These signatures, unlike the ed25519 signature reference implementation, are
/// "detached"—that is, they do **not** include a copy of the message which has
/// been signed.
#[derive(Copy)]
#[repr(C)]
pub struct Signature(pub [u8; SIGNATURE_LENGTH]);
impl Clone for Signature {
fn clone(&self) -> Self { *self }
}
impl Debug for Signature {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Signature([{:?}])", &self.0[..])
}
}
impl Eq for Signature {}
impl PartialEq for Signature {
fn eq(&self, other: &Signature) -> bool {
let mut equal: u8 = 0;
for i in 0..64 {
equal |= self.0[i] ^ other.0[i];
}
if equal == 0 {
return true;
} else {
return false;
}
}
}
impl Signature {
/// View this `Signature` as a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; SIGNATURE_LENGTH] {
self.0
}
/// View this `Signature` as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; SIGNATURE_LENGTH] {
&self.0
}
/// Construct a `Signature` from a slice of bytes.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> Signature {
Signature(*array_ref!(bytes, 0, SIGNATURE_LENGTH))
}
}
/// An EdDSA secret key.
#[repr(C)]
pub struct SecretKey(pub [u8; SECRET_KEY_LENGTH]);
impl Debug for SecretKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "SecretKey: {:?}", &self.0[..])
}
}
impl SecretKey {
/// Convert this secret key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; SECRET_KEY_LENGTH] {
self.0
}
/// View this secret key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; SECRET_KEY_LENGTH] {
&self.0
}
/// Construct a `SecretKey` from a slice of bytes.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::SECRET_KEY_LENGTH;
///
/// let secret_key_bytes: [u8; SECRET_KEY_LENGTH] = [
/// 157, 097, 177, 157, 239, 253, 090, 096,
/// 186, 132, 074, 244, 146, 236, 044, 196,
/// 068, 073, 197, 105, 123, 050, 105, 025,
/// 112, 059, 172, 003, 028, 174, 127, 096, ];
///
/// let secret_key: SecretKey = SecretKey::from_bytes(&secret_key_bytes[..]);
/// # }
/// ```
///
/// # Returns
///
/// An EdDSA `SecretKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> SecretKey {
SecretKey(*array_ref!(bytes, 0, SECRET_KEY_LENGTH))
}
/// Generate a `SecretKey` from a `csprng`.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::Signature;
///
/// let mut csprng: OsRng = OsRng::new().unwrap();
/// let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// # }
/// ```
///
/// Afterwards, you can generate the corresponding public—provided you also
/// supply a hash function which implements the `Digest` and `Default`
/// traits, and which returns 512 bits of output—via:
///
/// ```
/// # extern crate rand;
/// # extern crate sha2;
/// # extern crate ed25519_dalek;
/// #
/// # fn main() {
/// #
/// # use rand::Rng;
/// # use rand::OsRng;
/// # use sha2::Sha512;
/// # use ed25519_dalek::PublicKey;
/// # use ed25519_dalek::SecretKey;
/// # use ed25519_dalek::Signature;
/// #
/// # let mut csprng: OsRng = OsRng::new().unwrap();
/// # let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// let public_key: PublicKey = PublicKey::from_secret::<Sha512>(&secret_key);
/// # }
/// ```
///
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
///
/// # Input
///
/// A CSPRING with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
#[cfg(feature = "std")]
pub fn generate(csprng: &mut Rng) -> SecretKey {
let mut sk: SecretKey = SecretKey([0u8; 32]);
csprng.fill_bytes(&mut sk.0);
sk
}
}
/// An ed25519 public key.
#[derive(Copy, Clone)]
#[repr(C)]
pub struct PublicKey(pub CompressedEdwardsY);
impl Debug for PublicKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "PublicKey( CompressedPoint( {:?} ))", self.0)
}
}
impl PublicKey {
/// Convert this public key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; PUBLIC_KEY_LENGTH] {
self.0.to_bytes()
}
/// View this public key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; PUBLIC_KEY_LENGTH] {
&(self.0).0
}
/// Construct a `PublicKey` from a slice of bytes.
///
/// # Warning
///
/// The caller is responsible for ensuring that the bytes passed into this
/// method actually represent a `curve25519_dalek::curve::CompressedEdwardsY`
/// and that said compressed point is actually a point on the curve.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::PUBLIC_KEY_LENGTH;
///
/// let public_key_bytes: [u8; PUBLIC_KEY_LENGTH] = [
/// 215, 90, 152, 1, 130, 177, 10, 183, 213, 75, 254, 211, 201, 100, 7, 58,
/// 14, 225, 114, 243, 218, 166, 35, 37, 175, 2, 26, 104, 247, 7, 81, 26];
///
/// let public_key: PublicKey = PublicKey::from_bytes(&public_key_bytes);
/// # }
/// ```
///
/// # Returns
///
/// A `PublicKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> PublicKey {
PublicKey(CompressedEdwardsY(*array_ref!(bytes, 0, 32)))
}
/// Convert this public key to its underlying extended twisted Edwards coordinate.
#[inline]
fn decompress(&self) -> Option<ExtendedPoint> {
self.0.decompress()
}
/// Derive this public key from its corresponding `SecretKey`.
#[cfg(feature = "std")]
#[allow(unused_assignments)]
pub fn from_secret<D>(secret_key: &SecretKey) -> PublicKey
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let pk: [u8; 32];
let mut digest: &mut [u8; 32];
h.input(secret_key.as_bytes());
hash.copy_from_slice(h.fixed_result().as_slice());
digest = array_mut_ref!(&mut hash, 0, 32);
digest[0] &= 248;
digest[31] &= 127;
digest[31] |= 64;
pk = (&Scalar(*digest) * &constants::ED25519_BASEPOINT_TABLE).compress().to_bytes();
PublicKey(CompressedEdwardsY(pk))
}
/// Verify a signature on a message with this keypair's public key.
///
/// # Return
///
/// Returns true if the signature was successfully verified, and
/// false otherwise.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: Digest<OutputSize = U64> + Default {
use curve25519_dalek::edwards::vartime;
let mut h: D = D::default();
let mut a: ExtendedPoint;
let ao: Option<ExtendedPoint>;
let r: ExtendedPoint;
let digest: [u8; 64];
let digest_reduced: Scalar;
if signature.0[63] & 224!= 0 { | if ao.is_some() {
a = ao.unwrap();
} else {
return false;
}
a = -(&a);
let top_half: &[u8; 32] = array_ref!(&signature.0, 32, 32);
let bottom_half: &[u8; 32] = array_ref!(&signature.0, 0, 32);
h.input(&bottom_half[..]);
h.input(&self.to_bytes());
h.input(&message);
let digest_bytes = h.fixed_result();
digest = *array_ref!(digest_bytes, 0, 64);
digest_reduced = Scalar::reduce(&digest);
r = vartime::double_scalar_mult_basepoint(&digest_reduced, &a, &Scalar(*top_half));
slices_equal(bottom_half, &r.compress().to_bytes()) == 1
}
}
/// An ed25519 keypair.
#[derive(Debug)]
#[repr(C)]
pub struct Keypair {
/// The public half of this keypair.
pub public: PublicKey,
/// The secret half of this keypair.
pub secret: SecretKey,
}
impl Keypair {
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `public`: a `[u8; 32]` representing the compressed Edwards-Y
/// coordinate of a point on curve25519.
/// * `secret`: a `[u8; 32]` representing the corresponding secret key.
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Keypair`.
pub fn from_bytes<'a>(public: &'a [u8; 32], secret: &'a [u8; 32]) -> Keypair {
Keypair{ public: PublicKey::from_bytes(public),
secret: SecretKey::from_bytes(secret), }
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let mut cspring: OsRng = OsRng::new().unwrap();
/// let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
///
/// # }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
#[cfg(feature = "std")]
pub fn generate<D>(csprng: &mut Rng) -> Keypair
where D: Digest<OutputSize = U64> + Default {
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = PublicKey::from_secret::<D>(&sk);
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign<D>(&self, message: &[u8]) -> Signature
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let mut signature_bytes: [u8; 64] = [0u8; SIGNATURE_LENGTH];
let mut expanded_key_secret: Scalar;
let mesg_digest: Scalar;
let hram_digest: Scalar;
let r: ExtendedPoint;
let s: Scalar;
let t: CompressedEdwardsY;
let secret_key: &[u8; 32] = self.secret.as_bytes();
let public_key: &[u8; 32] = self.public.as_bytes();
h.input(secret_key);
hash.copy_from_slice(h.fixed_result().as_slice());
expanded_key_secret = Scalar(*array_ref!(&hash, 0, 32));
expanded_key_secret[0] &= 248;
expanded_key_secret[31] &= 63;
expanded_key_secret[31] |= 64;
h = D::default();
h.input(&hash[32..]);
h.input(&message);
hash.copy_from_slice(h.fixed_result().as_slice());
mesg_digest = Scalar::reduce(&hash);
r = &mesg_digest * &constants::ED25519_BASEPOINT_TABLE;
h = D::default();
h.input(&r.compress().to_bytes()[..]);
h.input(public_key);
h.input(&message);
hash.copy_from_slice(h.fixed_result().as_slice());
hram_digest = Scalar::reduce(&hash);
s = Scalar::multiply_add(&hram_digest, &expanded_key_secret, &mesg_digest);
t = r.compress();
signature_bytes[..32].copy_from_slice(&t.0);
signature_bytes[32..64].copy_from_slice(&s.0);
Signature(*array_ref!(&signature_bytes, 0, 64))
}
/// Verify a signature on a message with this keypair's public key.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: FixedOutput<OutputSize = U64> + BlockInput + Default + Input {
self.public.verify::<D>(message, signature)
}
}
#[cfg(test)]
mod test {
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
use std::string::String;
use std::vec::Vec;
use curve25519_dalek::edwards::ExtendedPoint;
use rand::OsRng;
use hex::FromHex;
use sha2::Sha512;
use super::*;
#[test]
fn unmarshal_marshal() { // TestUnmarshalMarshal
let mut cspring: OsRng;
let mut keypair: Keypair;
let mut x: Option<ExtendedPoint>;
let a: ExtendedPoint;
let public: PublicKey;
cspring = OsRng::new().unwrap();
// from_bytes() fails if vx²-u=0 and vx²+u=0
loop {
keypair = Keypair::generate::<Sha512>(&mut cspring);
x = keypair.public.decompress();
if x.is_some() {
a = x.unwrap();
break;
}
}
public = PublicKey(a.compress());
assert!(keypair.public.0 == public.0);
}
#[test]
fn sign_verify() { // TestSignVerify
let mut cspring: OsRng;
let keypair: Keypair;
let good_sig: Signature;
let bad_sig: Signature;
let good: &[u8] = "test message".as_bytes();
let bad: &[u8] = "wrong message".as_bytes();
cspring = OsRng::new().unwrap();
keypair = Keypair::generate::<Sha512>(&mut cspring);
good_sig = keypair.sign::<Sha512>(&good);
bad_sig = keypair.sign::<Sha512>(&bad);
assert!(keypair.verify::<Sha512>(&good, &good_sig) == true,
"Verification of a valid signature failed!");
assert!(keypair.verify::<Sha512>(&good, &bad_sig) == false,
"Verification of a signature on a different message passed!");
assert!(keypair.verify::<Sha512>(&bad, &good_sig) == false,
"Verification of a signature on a different message passed!");
}
// TESTVECTORS is taken from sign.input.gz in agl's ed25519 Golang
// package. It is a selection of test cases from
// http://ed25519.cr.yp.to/python/sign.input
#[cfg(test)]
#[cfg(not(release))]
#[test]
fn golden() { // TestGolden
let mut line: String;
let mut lineno: usize = 0;
let f = File::open("TESTVECTORS");
if f.is_err() {
println!("This test is only available when the code has been cloned \
from the git repository, since the TESTVECTORS file is large \
and is therefore not included within the distributed crate.");
panic!();
}
let file = BufReader::new(f.unwrap());
for l in file.lines() {
lineno += 1;
line = l.unwrap();
let parts: Vec<&str> = line.split(':').collect();
assert_eq!(parts.len(), 5, "wrong number of fields in line {}", lineno);
let sec_bytes: Vec<u8>= FromHex::from_hex(&parts[0]).unwrap();
let pub_bytes: Vec<u8> = FromHex::from_hex(&parts[1]).unwrap();
let message: Vec<u8> = FromHex::from_hex(&parts[2]).unwrap();
let sig_bytes: Vec<u8> = FromHex::from_hex(&parts[3]).unwrap();
// The signatures in the test vectors also include the message
// at the end, but we just want R and S.
let sig1: Signature = Signature::from_bytes(sig_bytes.as_ref());
let keypair: Keypair = Keypair::from_bytes(
array_ref!(*pub_bytes, 0, PUBLIC_KEY_LENGTH),
array_ref!(*sec_bytes, 0, SECRET_KEY_LENGTH));
let sig2: Signature = keypair.sign::<Sha512>(&message);
assert!(sig1 == sig2, "Signature bytes not equal on line {}", lineno);
assert!(keypair.verify::<Sha512>(&message, &sig2),
"Signature verification failed on line {}", lineno);
}
}
}
#[cfg(all(test, feature = "bench"))]
mod bench {
use test::Bencher;
use rand::OsRng;
use sha2::Sha512;
use super::*;
/// A fake RNG which simply returns zeroes.
struct ZeroRng;
impl ZeroRng {
pub fn new() -> ZeroRng {
ZeroRng
}
}
impl Rng for ZeroRng {
fn next_u32(&mut self) -> u32 { 0u32 }
fn fill_bytes(&mut self, bytes: &mut [u8]) {
for i in 0.. bytes.len() {
bytes[i] = 0;
}
}
}
#[bench]
fn sign(b: &mut Bencher) {
let mut cspring: OsRng = OsRng::new().unwrap();
let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
let msg: &[u8] = b"";
b.iter(| | keypair.sign::<Sha512>(msg));
}
#[bench]
fn verify(b: &mut Bencher) {
let mut cspring: OsRng = OsRng::new().unwrap();
let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
let msg: &[u8] = b"";
let sig: Signature = keypair.sign::<Sha512>(msg);
b.iter(| | keypair.verify::<Sha512>(msg, &sig));
}
#[bench]
fn key_generation(b: &mut Bencher) {
let mut rng: ZeroRng = ZeroRng::new();
b.iter(| | Keypair::generate::<Sha512>(&mut rng));
}
#[bench]
fn underlying_scalar_mult_basepoint(b: &mut Bencher) {
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
let scalar: Scalar = Scalar([ 20, 130, 129, 196, 247, 182, 211, 102,
11, 168, 169, 131, 159, 69, 126, 35,
| return false;
}
ao = self.decompress();
| random_line_split |
ed25519.rs | :
// - Isis Agora Lovecruft <[email protected]>
//! A Rust implementation of ed25519 EdDSA key generation, signing, and
//! verification.
use core::fmt::Debug;
#[cfg(feature = "std")]
use rand::Rng;
use digest::BlockInput;
use digest::Digest;
use digest::Input;
use digest::FixedOutput;
use generic_array::typenum::U64;
use curve25519_dalek::constants;
use curve25519_dalek::edwards::CompressedEdwardsY;
use curve25519_dalek::edwards::ExtendedPoint;
use curve25519_dalek::scalar::Scalar;
use subtle::slices_equal;
/// The length of an ed25519 EdDSA `Signature`, in bytes.
pub const SIGNATURE_LENGTH: usize = 64;
/// The length of an ed25519 EdDSA `SecretKey`, in bytes.
pub const SECRET_KEY_LENGTH: usize = 32;
/// The length of an ed25519 EdDSA `PublicKey`, in bytes.
pub const PUBLIC_KEY_LENGTH: usize = 32;
/// An EdDSA signature.
///
/// # Note
///
/// These signatures, unlike the ed25519 signature reference implementation, are
/// "detached"—that is, they do **not** include a copy of the message which has
/// been signed.
#[derive(Copy)]
#[repr(C)]
pub struct Signature(pub [u8; SIGNATURE_LENGTH]);
impl Clone for Signature {
fn clone(&self) -> Self { *self }
}
impl Debug for Signature {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Signature([{:?}])", &self.0[..])
}
}
impl Eq for Signature {}
impl PartialEq for Signature {
fn eq(&self, other: &Signature) -> bool {
let mut equal: u8 = 0;
for i in 0..64 {
equal |= self.0[i] ^ other.0[i];
}
if equal == 0 {
return true;
} else {
return false;
}
}
}
impl Signature {
/// View this `Signature` as a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; SIGNATURE_LENGTH] {
self.0
}
/// View this `Signature` as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; SIGNATURE_LENGTH] {
&self.0
}
/// Construct a `Signature` from a slice of bytes.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> Signature {
Signature(*array_ref!(bytes, 0, SIGNATURE_LENGTH))
}
}
/// An EdDSA secret key.
#[repr(C)]
pub struct SecretKey(pub [u8; SECRET_KEY_LENGTH]);
impl Debug for SecretKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "SecretKey: {:?}", &self.0[..])
}
}
impl SecretKey {
/// Convert this secret key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; SECRET_KEY_LENGTH] {
self.0
}
/// View this secret key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; SECRET_KEY_LENGTH] {
&self.0
}
/// Construct a `SecretKey` from a slice of bytes.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::SECRET_KEY_LENGTH;
///
/// let secret_key_bytes: [u8; SECRET_KEY_LENGTH] = [
/// 157, 097, 177, 157, 239, 253, 090, 096,
/// 186, 132, 074, 244, 146, 236, 044, 196,
/// 068, 073, 197, 105, 123, 050, 105, 025,
/// 112, 059, 172, 003, 028, 174, 127, 096, ];
///
/// let secret_key: SecretKey = SecretKey::from_bytes(&secret_key_bytes[..]);
/// # }
/// ```
///
/// # Returns
///
/// An EdDSA `SecretKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> SecretKey {
SecretKey(*array_ref!(bytes, 0, SECRET_KEY_LENGTH))
}
/// Generate a `SecretKey` from a `csprng`.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::Signature;
///
/// let mut csprng: OsRng = OsRng::new().unwrap();
/// let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// # }
/// ```
///
/// Afterwards, you can generate the corresponding public—provided you also
/// supply a hash function which implements the `Digest` and `Default`
/// traits, and which returns 512 bits of output—via:
///
/// ```
/// # extern crate rand;
/// # extern crate sha2;
/// # extern crate ed25519_dalek;
/// #
/// # fn main() {
/// #
/// # use rand::Rng;
/// # use rand::OsRng;
/// # use sha2::Sha512;
/// # use ed25519_dalek::PublicKey;
/// # use ed25519_dalek::SecretKey;
/// # use ed25519_dalek::Signature;
/// #
/// # let mut csprng: OsRng = OsRng::new().unwrap();
/// # let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// let public_key: PublicKey = PublicKey::from_secret::<Sha512>(&secret_key);
/// # }
/// ```
///
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
///
/// # Input
///
/// A CSPRING with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
#[cfg(feature = "std")]
pub fn generate(csprng: &mut Rng) -> SecretKey {
let mut sk: SecretKey = SecretKey([0u8; 32]);
csprng.fill_bytes(&mut sk.0);
sk
}
}
/// An ed25519 public key.
#[derive(Copy, Clone)]
#[repr(C)]
pub struct PublicKey(pub CompressedEdwardsY);
impl Debug for PublicKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "PublicKey( CompressedPoint( {:?} ))", self.0)
}
}
impl PublicKey {
/// Convert this public key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; PUBLIC_KEY_LENGTH] {
| /// View this public key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; PUBLIC_KEY_LENGTH] {
&(self.0).0
}
/// Construct a `PublicKey` from a slice of bytes.
///
/// # Warning
///
/// The caller is responsible for ensuring that the bytes passed into this
/// method actually represent a `curve25519_dalek::curve::CompressedEdwardsY`
/// and that said compressed point is actually a point on the curve.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::PUBLIC_KEY_LENGTH;
///
/// let public_key_bytes: [u8; PUBLIC_KEY_LENGTH] = [
/// 215, 90, 152, 1, 130, 177, 10, 183, 213, 75, 254, 211, 201, 100, 7, 58,
/// 14, 225, 114, 243, 218, 166, 35, 37, 175, 2, 26, 104, 247, 7, 81, 26];
///
/// let public_key: PublicKey = PublicKey::from_bytes(&public_key_bytes);
/// # }
/// ```
///
/// # Returns
///
/// A `PublicKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> PublicKey {
PublicKey(CompressedEdwardsY(*array_ref!(bytes, 0, 32)))
}
/// Convert this public key to its underlying extended twisted Edwards coordinate.
#[inline]
fn decompress(&self) -> Option<ExtendedPoint> {
self.0.decompress()
}
/// Derive this public key from its corresponding `SecretKey`.
#[cfg(feature = "std")]
#[allow(unused_assignments)]
pub fn from_secret<D>(secret_key: &SecretKey) -> PublicKey
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let pk: [u8; 32];
let mut digest: &mut [u8; 32];
h.input(secret_key.as_bytes());
hash.copy_from_slice(h.fixed_result().as_slice());
digest = array_mut_ref!(&mut hash, 0, 32);
digest[0] &= 248;
digest[31] &= 127;
digest[31] |= 64;
pk = (&Scalar(*digest) * &constants::ED25519_BASEPOINT_TABLE).compress().to_bytes();
PublicKey(CompressedEdwardsY(pk))
}
/// Verify a signature on a message with this keypair's public key.
///
/// # Return
///
/// Returns true if the signature was successfully verified, and
/// false otherwise.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: Digest<OutputSize = U64> + Default {
use curve25519_dalek::edwards::vartime;
let mut h: D = D::default();
let mut a: ExtendedPoint;
let ao: Option<ExtendedPoint>;
let r: ExtendedPoint;
let digest: [u8; 64];
let digest_reduced: Scalar;
if signature.0[63] & 224!= 0 {
return false;
}
ao = self.decompress();
if ao.is_some() {
a = ao.unwrap();
} else {
return false;
}
a = -(&a);
let top_half: &[u8; 32] = array_ref!(&signature.0, 32, 32);
let bottom_half: &[u8; 32] = array_ref!(&signature.0, 0, 32);
h.input(&bottom_half[..]);
h.input(&self.to_bytes());
h.input(&message);
let digest_bytes = h.fixed_result();
digest = *array_ref!(digest_bytes, 0, 64);
digest_reduced = Scalar::reduce(&digest);
r = vartime::double_scalar_mult_basepoint(&digest_reduced, &a, &Scalar(*top_half));
slices_equal(bottom_half, &r.compress().to_bytes()) == 1
}
}
/// An ed25519 keypair.
#[derive(Debug)]
#[repr(C)]
pub struct Keypair {
/// The public half of this keypair.
pub public: PublicKey,
/// The secret half of this keypair.
pub secret: SecretKey,
}
impl Keypair {
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `public`: a `[u8; 32]` representing the compressed Edwards-Y
/// coordinate of a point on curve25519.
/// * `secret`: a `[u8; 32]` representing the corresponding secret key.
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Keypair`.
pub fn from_bytes<'a>(public: &'a [u8; 32], secret: &'a [u8; 32]) -> Keypair {
Keypair{ public: PublicKey::from_bytes(public),
secret: SecretKey::from_bytes(secret), }
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let mut cspring: OsRng = OsRng::new().unwrap();
/// let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
///
/// # }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
#[cfg(feature = "std")]
pub fn generate<D>(csprng: &mut Rng) -> Keypair
where D: Digest<OutputSize = U64> + Default {
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = PublicKey::from_secret::<D>(&sk);
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign<D>(&self, message: &[u8]) -> Signature
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let mut signature_bytes: [u8; 64] = [0u8; SIGNATURE_LENGTH];
let mut expanded_key_secret: Scalar;
let mesg_digest: Scalar;
let hram_digest: Scalar;
let r: ExtendedPoint;
let s: Scalar;
let t: CompressedEdwardsY;
let secret_key: &[u8; 32] = self.secret.as_bytes();
let public_key: &[u8; 32] = self.public.as_bytes();
h.input(secret_key);
hash.copy_from_slice(h.fixed_result().as_slice());
expanded_key_secret = Scalar(*array_ref!(&hash, 0, 32));
expanded_key_secret[0] &= 248;
expanded_key_secret[31] &= 63;
expanded_key_secret[31] |= 64;
h = D::default();
h.input(&hash[32..]);
h.input(&message);
hash.copy_from_slice(h.fixed_result().as_slice());
mesg_digest = Scalar::reduce(&hash);
r = &mesg_digest * &constants::ED25519_BASEPOINT_TABLE;
h = D::default();
h.input(&r.compress().to_bytes()[..]);
h.input(public_key);
h.input(&message);
hash.copy_from_slice(h.fixed_result().as_slice());
hram_digest = Scalar::reduce(&hash);
s = Scalar::multiply_add(&hram_digest, &expanded_key_secret, &mesg_digest);
t = r.compress();
signature_bytes[..32].copy_from_slice(&t.0);
signature_bytes[32..64].copy_from_slice(&s.0);
Signature(*array_ref!(&signature_bytes, 0, 64))
}
/// Verify a signature on a message with this keypair's public key.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: FixedOutput<OutputSize = U64> + BlockInput + Default + Input {
self.public.verify::<D>(message, signature)
}
}
#[cfg(test)]
mod test {
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
use std::string::String;
use std::vec::Vec;
use curve25519_dalek::edwards::ExtendedPoint;
use rand::OsRng;
use hex::FromHex;
use sha2::Sha512;
use super::*;
#[test]
fn unmarshal_marshal() { // TestUnmarshalMarshal
let mut cspring: OsRng;
let mut keypair: Keypair;
let mut x: Option<ExtendedPoint>;
let a: ExtendedPoint;
let public: PublicKey;
cspring = OsRng::new().unwrap();
// from_bytes() fails if vx²-u=0 and vx²+u=0
loop {
keypair = Keypair::generate::<Sha512>(&mut cspring);
x = keypair.public.decompress();
if x.is_some() {
a = x.unwrap();
break;
}
}
public = PublicKey(a.compress());
assert!(keypair.public.0 == public.0);
}
#[test]
fn sign_verify() { // TestSignVerify
let mut cspring: OsRng;
let keypair: Keypair;
let good_sig: Signature;
let bad_sig: Signature;
let good: &[u8] = "test message".as_bytes();
let bad: &[u8] = "wrong message".as_bytes();
cspring = OsRng::new().unwrap();
keypair = Keypair::generate::<Sha512>(&mut cspring);
good_sig = keypair.sign::<Sha512>(&good);
bad_sig = keypair.sign::<Sha512>(&bad);
assert!(keypair.verify::<Sha512>(&good, &good_sig) == true,
"Verification of a valid signature failed!");
assert!(keypair.verify::<Sha512>(&good, &bad_sig) == false,
"Verification of a signature on a different message passed!");
assert!(keypair.verify::<Sha512>(&bad, &good_sig) == false,
"Verification of a signature on a different message passed!");
}
// TESTVECTORS is taken from sign.input.gz in agl's ed25519 Golang
// package. It is a selection of test cases from
// http://ed25519.cr.yp.to/python/sign.input
#[cfg(test)]
#[cfg(not(release))]
#[test]
fn golden() { // TestGolden
let mut line: String;
let mut lineno: usize = 0;
let f = File::open("TESTVECTORS");
if f.is_err() {
println!("This test is only available when the code has been cloned \
from the git repository, since the TESTVECTORS file is large \
and is therefore not included within the distributed crate.");
panic!();
}
let file = BufReader::new(f.unwrap());
for l in file.lines() {
lineno += 1;
line = l.unwrap();
let parts: Vec<&str> = line.split(':').collect();
assert_eq!(parts.len(), 5, "wrong number of fields in line {}", lineno);
let sec_bytes: Vec<u8>= FromHex::from_hex(&parts[0]).unwrap();
let pub_bytes: Vec<u8> = FromHex::from_hex(&parts[1]).unwrap();
let message: Vec<u8> = FromHex::from_hex(&parts[2]).unwrap();
let sig_bytes: Vec<u8> = FromHex::from_hex(&parts[3]).unwrap();
// The signatures in the test vectors also include the message
// at the end, but we just want R and S.
let sig1: Signature = Signature::from_bytes(sig_bytes.as_ref());
let keypair: Keypair = Keypair::from_bytes(
array_ref!(*pub_bytes, 0, PUBLIC_KEY_LENGTH),
array_ref!(*sec_bytes, 0, SECRET_KEY_LENGTH));
let sig2: Signature = keypair.sign::<Sha512>(&message);
assert!(sig1 == sig2, "Signature bytes not equal on line {}", lineno);
assert!(keypair.verify::<Sha512>(&message, &sig2),
"Signature verification failed on line {}", lineno);
}
}
}
#[cfg(all(test, feature = "bench"))]
mod bench {
use test::Bencher;
use rand::OsRng;
use sha2::Sha512;
use super::*;
/// A fake RNG which simply returns zeroes.
struct ZeroRng;
impl ZeroRng {
pub fn new() -> ZeroRng {
ZeroRng
}
}
impl Rng for ZeroRng {
fn next_u32(&mut self) -> u32 { 0u32 }
fn fill_bytes(&mut self, bytes: &mut [u8]) {
for i in 0.. bytes.len() {
bytes[i] = 0;
}
}
}
#[bench]
fn sign(b: &mut Bencher) {
let mut cspring: OsRng = OsRng::new().unwrap();
let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
let msg: &[u8] = b"";
b.iter(| | keypair.sign::<Sha512>(msg));
}
#[bench]
fn verify(b: &mut Bencher) {
let mut cspring: OsRng = OsRng::new().unwrap();
let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
let msg: &[u8] = b"";
let sig: Signature = keypair.sign::<Sha512>(msg);
b.iter(| | keypair.verify::<Sha512>(msg, &sig));
}
#[bench]
fn key_generation(b: &mut Bencher) {
let mut rng: ZeroRng = ZeroRng::new();
b.iter(| | Keypair::generate::<Sha512>(&mut rng));
}
#[bench]
fn underlying_scalar_mult_basepoint(b: &mut Bencher) {
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
let scalar: Scalar = Scalar([ 20, 130, 129, 196, 247, 182, 211, 102,
11, 168, 169, 131, 159, 69, 126, 35,
| self.0.to_bytes()
}
| identifier_body |
ed25519.rs | :
// - Isis Agora Lovecruft <[email protected]>
//! A Rust implementation of ed25519 EdDSA key generation, signing, and
//! verification.
use core::fmt::Debug;
#[cfg(feature = "std")]
use rand::Rng;
use digest::BlockInput;
use digest::Digest;
use digest::Input;
use digest::FixedOutput;
use generic_array::typenum::U64;
use curve25519_dalek::constants;
use curve25519_dalek::edwards::CompressedEdwardsY;
use curve25519_dalek::edwards::ExtendedPoint;
use curve25519_dalek::scalar::Scalar;
use subtle::slices_equal;
/// The length of an ed25519 EdDSA `Signature`, in bytes.
pub const SIGNATURE_LENGTH: usize = 64;
/// The length of an ed25519 EdDSA `SecretKey`, in bytes.
pub const SECRET_KEY_LENGTH: usize = 32;
/// The length of an ed25519 EdDSA `PublicKey`, in bytes.
pub const PUBLIC_KEY_LENGTH: usize = 32;
/// An EdDSA signature.
///
/// # Note
///
/// These signatures, unlike the ed25519 signature reference implementation, are
/// "detached"—that is, they do **not** include a copy of the message which has
/// been signed.
#[derive(Copy)]
#[repr(C)]
pub struct Signature(pub [u8; SIGNATURE_LENGTH]);
impl Clone for Signature {
fn clone(&self) -> Self { *self }
}
impl Debug for Signature {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Signature([{:?}])", &self.0[..])
}
}
impl Eq for Signature {}
impl PartialEq for Signature {
fn eq(&self, other: &Signature) -> bool {
let mut equal: u8 = 0;
for i in 0..64 {
equal |= self.0[i] ^ other.0[i];
}
if equal == 0 {
| lse {
return false;
}
}
}
impl Signature {
/// View this `Signature` as a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; SIGNATURE_LENGTH] {
self.0
}
/// View this `Signature` as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; SIGNATURE_LENGTH] {
&self.0
}
/// Construct a `Signature` from a slice of bytes.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> Signature {
Signature(*array_ref!(bytes, 0, SIGNATURE_LENGTH))
}
}
/// An EdDSA secret key.
#[repr(C)]
pub struct SecretKey(pub [u8; SECRET_KEY_LENGTH]);
impl Debug for SecretKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "SecretKey: {:?}", &self.0[..])
}
}
impl SecretKey {
/// Convert this secret key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; SECRET_KEY_LENGTH] {
self.0
}
/// View this secret key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; SECRET_KEY_LENGTH] {
&self.0
}
/// Construct a `SecretKey` from a slice of bytes.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::SECRET_KEY_LENGTH;
///
/// let secret_key_bytes: [u8; SECRET_KEY_LENGTH] = [
/// 157, 097, 177, 157, 239, 253, 090, 096,
/// 186, 132, 074, 244, 146, 236, 044, 196,
/// 068, 073, 197, 105, 123, 050, 105, 025,
/// 112, 059, 172, 003, 028, 174, 127, 096, ];
///
/// let secret_key: SecretKey = SecretKey::from_bytes(&secret_key_bytes[..]);
/// # }
/// ```
///
/// # Returns
///
/// An EdDSA `SecretKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> SecretKey {
SecretKey(*array_ref!(bytes, 0, SECRET_KEY_LENGTH))
}
/// Generate a `SecretKey` from a `csprng`.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::Signature;
///
/// let mut csprng: OsRng = OsRng::new().unwrap();
/// let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// # }
/// ```
///
/// Afterwards, you can generate the corresponding public—provided you also
/// supply a hash function which implements the `Digest` and `Default`
/// traits, and which returns 512 bits of output—via:
///
/// ```
/// # extern crate rand;
/// # extern crate sha2;
/// # extern crate ed25519_dalek;
/// #
/// # fn main() {
/// #
/// # use rand::Rng;
/// # use rand::OsRng;
/// # use sha2::Sha512;
/// # use ed25519_dalek::PublicKey;
/// # use ed25519_dalek::SecretKey;
/// # use ed25519_dalek::Signature;
/// #
/// # let mut csprng: OsRng = OsRng::new().unwrap();
/// # let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// let public_key: PublicKey = PublicKey::from_secret::<Sha512>(&secret_key);
/// # }
/// ```
///
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
///
/// # Input
///
/// A CSPRING with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
#[cfg(feature = "std")]
pub fn generate(csprng: &mut Rng) -> SecretKey {
let mut sk: SecretKey = SecretKey([0u8; 32]);
csprng.fill_bytes(&mut sk.0);
sk
}
}
/// An ed25519 public key.
#[derive(Copy, Clone)]
#[repr(C)]
pub struct PublicKey(pub CompressedEdwardsY);
impl Debug for PublicKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "PublicKey( CompressedPoint( {:?} ))", self.0)
}
}
impl PublicKey {
/// Convert this public key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; PUBLIC_KEY_LENGTH] {
self.0.to_bytes()
}
/// View this public key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; PUBLIC_KEY_LENGTH] {
&(self.0).0
}
/// Construct a `PublicKey` from a slice of bytes.
///
/// # Warning
///
/// The caller is responsible for ensuring that the bytes passed into this
/// method actually represent a `curve25519_dalek::curve::CompressedEdwardsY`
/// and that said compressed point is actually a point on the curve.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::PUBLIC_KEY_LENGTH;
///
/// let public_key_bytes: [u8; PUBLIC_KEY_LENGTH] = [
/// 215, 90, 152, 1, 130, 177, 10, 183, 213, 75, 254, 211, 201, 100, 7, 58,
/// 14, 225, 114, 243, 218, 166, 35, 37, 175, 2, 26, 104, 247, 7, 81, 26];
///
/// let public_key: PublicKey = PublicKey::from_bytes(&public_key_bytes);
/// # }
/// ```
///
/// # Returns
///
/// A `PublicKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> PublicKey {
PublicKey(CompressedEdwardsY(*array_ref!(bytes, 0, 32)))
}
/// Convert this public key to its underlying extended twisted Edwards coordinate.
#[inline]
fn decompress(&self) -> Option<ExtendedPoint> {
self.0.decompress()
}
/// Derive this public key from its corresponding `SecretKey`.
#[cfg(feature = "std")]
#[allow(unused_assignments)]
pub fn from_secret<D>(secret_key: &SecretKey) -> PublicKey
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let pk: [u8; 32];
let mut digest: &mut [u8; 32];
h.input(secret_key.as_bytes());
hash.copy_from_slice(h.fixed_result().as_slice());
digest = array_mut_ref!(&mut hash, 0, 32);
digest[0] &= 248;
digest[31] &= 127;
digest[31] |= 64;
pk = (&Scalar(*digest) * &constants::ED25519_BASEPOINT_TABLE).compress().to_bytes();
PublicKey(CompressedEdwardsY(pk))
}
/// Verify a signature on a message with this keypair's public key.
///
/// # Return
///
/// Returns true if the signature was successfully verified, and
/// false otherwise.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: Digest<OutputSize = U64> + Default {
use curve25519_dalek::edwards::vartime;
let mut h: D = D::default();
let mut a: ExtendedPoint;
let ao: Option<ExtendedPoint>;
let r: ExtendedPoint;
let digest: [u8; 64];
let digest_reduced: Scalar;
if signature.0[63] & 224!= 0 {
return false;
}
ao = self.decompress();
if ao.is_some() {
a = ao.unwrap();
} else {
return false;
}
a = -(&a);
let top_half: &[u8; 32] = array_ref!(&signature.0, 32, 32);
let bottom_half: &[u8; 32] = array_ref!(&signature.0, 0, 32);
h.input(&bottom_half[..]);
h.input(&self.to_bytes());
h.input(&message);
let digest_bytes = h.fixed_result();
digest = *array_ref!(digest_bytes, 0, 64);
digest_reduced = Scalar::reduce(&digest);
r = vartime::double_scalar_mult_basepoint(&digest_reduced, &a, &Scalar(*top_half));
slices_equal(bottom_half, &r.compress().to_bytes()) == 1
}
}
/// An ed25519 keypair.
#[derive(Debug)]
#[repr(C)]
pub struct Keypair {
/// The public half of this keypair.
pub public: PublicKey,
/// The secret half of this keypair.
pub secret: SecretKey,
}
impl Keypair {
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `public`: a `[u8; 32]` representing the compressed Edwards-Y
/// coordinate of a point on curve25519.
/// * `secret`: a `[u8; 32]` representing the corresponding secret key.
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Keypair`.
pub fn from_bytes<'a>(public: &'a [u8; 32], secret: &'a [u8; 32]) -> Keypair {
Keypair{ public: PublicKey::from_bytes(public),
secret: SecretKey::from_bytes(secret), }
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let mut cspring: OsRng = OsRng::new().unwrap();
/// let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
///
/// # }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
#[cfg(feature = "std")]
pub fn generate<D>(csprng: &mut Rng) -> Keypair
where D: Digest<OutputSize = U64> + Default {
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = PublicKey::from_secret::<D>(&sk);
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign<D>(&self, message: &[u8]) -> Signature
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let mut signature_bytes: [u8; 64] = [0u8; SIGNATURE_LENGTH];
let mut expanded_key_secret: Scalar;
let mesg_digest: Scalar;
let hram_digest: Scalar;
let r: ExtendedPoint;
let s: Scalar;
let t: CompressedEdwardsY;
let secret_key: &[u8; 32] = self.secret.as_bytes();
let public_key: &[u8; 32] = self.public.as_bytes();
h.input(secret_key);
hash.copy_from_slice(h.fixed_result().as_slice());
expanded_key_secret = Scalar(*array_ref!(&hash, 0, 32));
expanded_key_secret[0] &= 248;
expanded_key_secret[31] &= 63;
expanded_key_secret[31] |= 64;
h = D::default();
h.input(&hash[32..]);
h.input(&message);
hash.copy_from_slice(h.fixed_result().as_slice());
mesg_digest = Scalar::reduce(&hash);
r = &mesg_digest * &constants::ED25519_BASEPOINT_TABLE;
h = D::default();
h.input(&r.compress().to_bytes()[..]);
h.input(public_key);
h.input(&message);
hash.copy_from_slice(h.fixed_result().as_slice());
hram_digest = Scalar::reduce(&hash);
s = Scalar::multiply_add(&hram_digest, &expanded_key_secret, &mesg_digest);
t = r.compress();
signature_bytes[..32].copy_from_slice(&t.0);
signature_bytes[32..64].copy_from_slice(&s.0);
Signature(*array_ref!(&signature_bytes, 0, 64))
}
/// Verify a signature on a message with this keypair's public key.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: FixedOutput<OutputSize = U64> + BlockInput + Default + Input {
self.public.verify::<D>(message, signature)
}
}
#[cfg(test)]
mod test {
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
use std::string::String;
use std::vec::Vec;
use curve25519_dalek::edwards::ExtendedPoint;
use rand::OsRng;
use hex::FromHex;
use sha2::Sha512;
use super::*;
#[test]
fn unmarshal_marshal() { // TestUnmarshalMarshal
let mut cspring: OsRng;
let mut keypair: Keypair;
let mut x: Option<ExtendedPoint>;
let a: ExtendedPoint;
let public: PublicKey;
cspring = OsRng::new().unwrap();
// from_bytes() fails if vx²-u=0 and vx²+u=0
loop {
keypair = Keypair::generate::<Sha512>(&mut cspring);
x = keypair.public.decompress();
if x.is_some() {
a = x.unwrap();
break;
}
}
public = PublicKey(a.compress());
assert!(keypair.public.0 == public.0);
}
#[test]
fn sign_verify() { // TestSignVerify
let mut cspring: OsRng;
let keypair: Keypair;
let good_sig: Signature;
let bad_sig: Signature;
let good: &[u8] = "test message".as_bytes();
let bad: &[u8] = "wrong message".as_bytes();
cspring = OsRng::new().unwrap();
keypair = Keypair::generate::<Sha512>(&mut cspring);
good_sig = keypair.sign::<Sha512>(&good);
bad_sig = keypair.sign::<Sha512>(&bad);
assert!(keypair.verify::<Sha512>(&good, &good_sig) == true,
"Verification of a valid signature failed!");
assert!(keypair.verify::<Sha512>(&good, &bad_sig) == false,
"Verification of a signature on a different message passed!");
assert!(keypair.verify::<Sha512>(&bad, &good_sig) == false,
"Verification of a signature on a different message passed!");
}
// TESTVECTORS is taken from sign.input.gz in agl's ed25519 Golang
// package. It is a selection of test cases from
// http://ed25519.cr.yp.to/python/sign.input
#[cfg(test)]
#[cfg(not(release))]
#[test]
fn golden() { // TestGolden
let mut line: String;
let mut lineno: usize = 0;
let f = File::open("TESTVECTORS");
if f.is_err() {
println!("This test is only available when the code has been cloned \
from the git repository, since the TESTVECTORS file is large \
and is therefore not included within the distributed crate.");
panic!();
}
let file = BufReader::new(f.unwrap());
for l in file.lines() {
lineno += 1;
line = l.unwrap();
let parts: Vec<&str> = line.split(':').collect();
assert_eq!(parts.len(), 5, "wrong number of fields in line {}", lineno);
let sec_bytes: Vec<u8>= FromHex::from_hex(&parts[0]).unwrap();
let pub_bytes: Vec<u8> = FromHex::from_hex(&parts[1]).unwrap();
let message: Vec<u8> = FromHex::from_hex(&parts[2]).unwrap();
let sig_bytes: Vec<u8> = FromHex::from_hex(&parts[3]).unwrap();
// The signatures in the test vectors also include the message
// at the end, but we just want R and S.
let sig1: Signature = Signature::from_bytes(sig_bytes.as_ref());
let keypair: Keypair = Keypair::from_bytes(
array_ref!(*pub_bytes, 0, PUBLIC_KEY_LENGTH),
array_ref!(*sec_bytes, 0, SECRET_KEY_LENGTH));
let sig2: Signature = keypair.sign::<Sha512>(&message);
assert!(sig1 == sig2, "Signature bytes not equal on line {}", lineno);
assert!(keypair.verify::<Sha512>(&message, &sig2),
"Signature verification failed on line {}", lineno);
}
}
}
#[cfg(all(test, feature = "bench"))]
mod bench {
use test::Bencher;
use rand::OsRng;
use sha2::Sha512;
use super::*;
/// A fake RNG which simply returns zeroes.
struct ZeroRng;
impl ZeroRng {
pub fn new() -> ZeroRng {
ZeroRng
}
}
impl Rng for ZeroRng {
fn next_u32(&mut self) -> u32 { 0u32 }
fn fill_bytes(&mut self, bytes: &mut [u8]) {
for i in 0.. bytes.len() {
bytes[i] = 0;
}
}
}
#[bench]
fn sign(b: &mut Bencher) {
let mut cspring: OsRng = OsRng::new().unwrap();
let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
let msg: &[u8] = b"";
b.iter(| | keypair.sign::<Sha512>(msg));
}
#[bench]
fn verify(b: &mut Bencher) {
let mut cspring: OsRng = OsRng::new().unwrap();
let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
let msg: &[u8] = b"";
let sig: Signature = keypair.sign::<Sha512>(msg);
b.iter(| | keypair.verify::<Sha512>(msg, &sig));
}
#[bench]
fn key_generation(b: &mut Bencher) {
let mut rng: ZeroRng = ZeroRng::new();
b.iter(| | Keypair::generate::<Sha512>(&mut rng));
}
#[bench]
fn underlying_scalar_mult_basepoint(b: &mut Bencher) {
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
let scalar: Scalar = Scalar([ 20, 130, 129, 196, 247, 182, 211, 102,
11, 168, 169, 131, 159, 69, 126, 35,
| return true;
} e | conditional_block |
nsis.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
#[cfg(target_os = "windows")]
use crate::bundle::windows::util::try_sign;
use crate::{
bundle::{
common::CommandExt,
windows::util::{
download, download_and_verify, extract_zip, HashAlgorithm, NSIS_OUTPUT_FOLDER_NAME,
NSIS_UPDATER_OUTPUT_FOLDER_NAME, WEBVIEW2_BOOTSTRAPPER_URL, WEBVIEW2_X64_INSTALLER_GUID,
WEBVIEW2_X86_INSTALLER_GUID,
},
},
Settings,
};
use tauri_utils::display_path;
use anyhow::Context;
use handlebars::{to_json, Handlebars};
use log::{info, warn};
use tauri_utils::{
config::{NSISInstallerMode, WebviewInstallMode},
resources::resource_relpath,
};
use std::{
collections::{BTreeMap, HashMap},
fs::{copy, create_dir_all, remove_dir_all, rename, write},
path::{Path, PathBuf},
process::Command,
};
// URLS for the NSIS toolchain.
#[cfg(target_os = "windows")]
const NSIS_URL: &str =
"https://github.com/tauri-apps/binary-releases/releases/download/nsis-3/nsis-3.zip";
#[cfg(target_os = "windows")]
const NSIS_SHA1: &str = "057e83c7d82462ec394af76c87d06733605543d4";
const NSIS_APPLICATIONID_URL: &str = "https://github.com/tauri-apps/binary-releases/releases/download/nsis-plugins-v0/NSIS-ApplicationID.zip";
const NSIS_TAURI_UTILS: &str =
"https://github.com/tauri-apps/nsis-tauri-utils/releases/download/nsis_tauri_utils-v0.1.1/nsis_tauri_utils.dll";
const NSIS_TAURI_UTILS_SHA1: &str = "A21C67CF5AB6D4274AFFF0D68CFCE680D213DDC7";
#[cfg(target_os = "windows")]
const NSIS_REQUIRED_FILES: &[&str] = &[
"makensis.exe",
"Bin/makensis.exe",
"Stubs/lzma-x86-unicode",
"Stubs/lzma_solid-x86-unicode",
"Plugins/x86-unicode/ApplicationID.dll",
"Plugins/x86-unicode/nsis_tauri_utils.dll",
"Include/MUI2.nsh",
"Include/FileFunc.nsh",
"Include/x64.nsh",
"Include/nsDialogs.nsh",
"Include/WinMessages.nsh",
];
#[cfg(not(target_os = "windows"))]
const NSIS_REQUIRED_FILES: &[&str] = &[
"Plugins/x86-unicode/ApplicationID.dll",
"Plugins/x86-unicode/nsis_tauri_utils.dll",
];
/// Runs all of the commands to build the NSIS installer.
/// Returns a vector of PathBuf that shows where the NSIS installer was created.
pub fn | (settings: &Settings, updater: bool) -> crate::Result<Vec<PathBuf>> {
let tauri_tools_path = dirs_next::cache_dir().unwrap().join("tauri");
let nsis_toolset_path = tauri_tools_path.join("NSIS");
if!nsis_toolset_path.exists() {
get_and_extract_nsis(&nsis_toolset_path, &tauri_tools_path)?;
} else if NSIS_REQUIRED_FILES
.iter()
.any(|p|!nsis_toolset_path.join(p).exists())
{
warn!("NSIS directory is missing some files. Recreating it.");
std::fs::remove_dir_all(&nsis_toolset_path)?;
get_and_extract_nsis(&nsis_toolset_path, &tauri_tools_path)?;
}
build_nsis_app_installer(settings, &nsis_toolset_path, &tauri_tools_path, updater)
}
// Gets NSIS and verifies the download via Sha1
fn get_and_extract_nsis(nsis_toolset_path: &Path, _tauri_tools_path: &Path) -> crate::Result<()> {
info!("Verifying NSIS package");
#[cfg(target_os = "windows")]
{
let data = download_and_verify(NSIS_URL, NSIS_SHA1, HashAlgorithm::Sha1)?;
info!("extracting NSIS");
extract_zip(&data, _tauri_tools_path)?;
rename(_tauri_tools_path.join("nsis-3.08"), nsis_toolset_path)?;
}
let nsis_plugins = nsis_toolset_path.join("Plugins");
let data = download(NSIS_APPLICATIONID_URL)?;
info!("extracting NSIS ApplicationID plugin");
extract_zip(&data, &nsis_plugins)?;
create_dir_all(nsis_plugins.join("x86-unicode"))?;
copy(
nsis_plugins
.join("ReleaseUnicode")
.join("ApplicationID.dll"),
nsis_plugins.join("x86-unicode").join("ApplicationID.dll"),
)?;
let data = download_and_verify(NSIS_TAURI_UTILS, NSIS_TAURI_UTILS_SHA1, HashAlgorithm::Sha1)?;
write(
nsis_plugins
.join("x86-unicode")
.join("nsis_tauri_utils.dll"),
data,
)?;
Ok(())
}
fn add_build_number_if_needed(version_str: &str) -> anyhow::Result<String> {
let version = semver::Version::parse(version_str).context("invalid app version")?;
if!version.build.is_empty() {
let build = version.build.parse::<u64>();
if build.is_ok() {
return Ok(format!(
"{}.{}.{}.{}",
version.major, version.minor, version.patch, version.build
));
} else {
anyhow::bail!("optional build metadata in app version must be numeric-only");
}
}
Ok(format!(
"{}.{}.{}.0",
version.major, version.minor, version.patch,
))
}
fn build_nsis_app_installer(
settings: &Settings,
_nsis_toolset_path: &Path,
tauri_tools_path: &Path,
updater: bool,
) -> crate::Result<Vec<PathBuf>> {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
"aarch64" => "arm64",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
info!("Target: {}", arch);
#[cfg(target_os = "windows")]
{
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let app_exe_source = settings.binary_path(main_binary);
try_sign(&app_exe_source, settings)?;
}
#[cfg(not(target_os = "windows"))]
info!("Code signing is currently only supported on Windows hosts, skipping...");
let output_path = settings.project_out_directory().join("nsis").join(arch);
if output_path.exists() {
remove_dir_all(&output_path)?;
}
create_dir_all(&output_path)?;
let mut data = BTreeMap::new();
let bundle_id = settings.bundle_identifier();
let manufacturer = settings
.publisher()
.unwrap_or_else(|| bundle_id.split('.').nth(1).unwrap_or(bundle_id));
#[cfg(not(target_os = "windows"))]
{
let mut dir = dirs_next::cache_dir().unwrap();
dir.extend(["tauri", "NSIS", "Plugins", "x86-unicode"]);
data.insert("additional_plugins_path", to_json(dir));
}
data.insert("arch", to_json(arch));
data.insert("bundle_id", to_json(bundle_id));
data.insert("manufacturer", to_json(manufacturer));
data.insert("product_name", to_json(settings.product_name()));
data.insert("short_description", to_json(settings.short_description()));
data.insert("copyright", to_json(settings.copyright_string()));
let version = settings.version_string();
data.insert("version", to_json(version));
data.insert(
"version_with_build",
to_json(add_build_number_if_needed(version)?),
);
data.insert(
"allow_downgrades",
to_json(settings.windows().allow_downgrades),
);
let mut install_mode = NSISInstallerMode::CurrentUser;
let mut languages = vec!["English".into()];
let mut custom_template_path = None;
let mut custom_language_files = None;
if let Some(nsis) = &settings.windows().nsis {
custom_template_path = nsis.template.clone();
custom_language_files = nsis.custom_language_files.clone();
install_mode = nsis.install_mode;
if let Some(langs) = &nsis.languages {
languages.clear();
languages.extend_from_slice(langs);
}
if let Some(license) = &nsis.license {
data.insert("license", to_json(dunce::canonicalize(license)?));
}
if let Some(installer_icon) = &nsis.installer_icon {
data.insert(
"installer_icon",
to_json(dunce::canonicalize(installer_icon)?),
);
}
if let Some(header_image) = &nsis.header_image {
data.insert("header_image", to_json(dunce::canonicalize(header_image)?));
}
if let Some(sidebar_image) = &nsis.sidebar_image {
data.insert(
"sidebar_image",
to_json(dunce::canonicalize(sidebar_image)?),
);
}
data.insert(
"display_language_selector",
to_json(nsis.display_language_selector && languages.len() > 1),
);
}
data.insert(
"install_mode",
to_json(match install_mode {
NSISInstallerMode::CurrentUser => "currentUser",
NSISInstallerMode::PerMachine => "perMachine",
NSISInstallerMode::Both => "both",
}),
);
let mut languages_data = Vec::new();
for lang in &languages {
if let Some(data) = get_lang_data(lang, custom_language_files.as_ref())? {
languages_data.push(data);
} else {
log::warn!("Custom tauri messages for {lang} are not translated.\nIf it is a valid language listed on <https://github.com/kichik/nsis/tree/9465c08046f00ccb6eda985abbdbf52c275c6c4d/Contrib/Language%20files>, please open a Tauri feature request\n or you can provide a custom language file for it in `tauri.conf.json > tauri > bundle > windows > nsis > custom_language_files`");
}
}
data.insert("languages", to_json(languages.clone()));
data.insert(
"language_files",
to_json(
languages_data
.iter()
.map(|d| d.0.clone())
.collect::<Vec<_>>(),
),
);
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
data.insert(
"main_binary_name",
to_json(main_binary.name().replace(".exe", "")),
);
data.insert(
"main_binary_path",
to_json(settings.binary_path(main_binary).with_extension("exe")),
);
let out_file = "nsis-output.exe";
data.insert("out_file", to_json(out_file));
let resources = generate_resource_data(settings)?;
data.insert("resources", to_json(resources));
let binaries = generate_binaries_data(settings)?;
data.insert("binaries", to_json(binaries));
if let Some(file_associations) = &settings.file_associations() {
data.insert("file_associations", to_json(file_associations));
}
let silent_webview2_install = if let WebviewInstallMode::DownloadBootstrapper { silent }
| WebviewInstallMode::EmbedBootstrapper { silent }
| WebviewInstallMode::OfflineInstaller { silent } =
settings.windows().webview_install_mode
{
silent
} else {
true
};
let webview2_install_mode = if updater {
WebviewInstallMode::DownloadBootstrapper {
silent: silent_webview2_install,
}
} else {
let mut webview_install_mode = settings.windows().webview_install_mode.clone();
if let Some(fixed_runtime_path) = settings.windows().webview_fixed_runtime_path.clone() {
webview_install_mode = WebviewInstallMode::FixedRuntime {
path: fixed_runtime_path,
};
} else if let Some(wix) = &settings.windows().wix {
if wix.skip_webview_install {
webview_install_mode = WebviewInstallMode::Skip;
}
}
webview_install_mode
};
let webview2_installer_args = to_json(if silent_webview2_install {
"/silent"
} else {
""
});
data.insert("webview2_installer_args", to_json(webview2_installer_args));
data.insert(
"install_webview2_mode",
to_json(match webview2_install_mode {
WebviewInstallMode::DownloadBootstrapper { silent: _ } => "downloadBootstrapper",
WebviewInstallMode::EmbedBootstrapper { silent: _ } => "embedBootstrapper",
WebviewInstallMode::OfflineInstaller { silent: _ } => "offlineInstaller",
_ => "",
}),
);
match webview2_install_mode {
WebviewInstallMode::EmbedBootstrapper { silent: _ } => {
let webview2_bootstrapper_path = tauri_tools_path.join("MicrosoftEdgeWebview2Setup.exe");
std::fs::write(
&webview2_bootstrapper_path,
download(WEBVIEW2_BOOTSTRAPPER_URL)?,
)?;
data.insert(
"webview2_bootstrapper_path",
to_json(webview2_bootstrapper_path),
);
}
WebviewInstallMode::OfflineInstaller { silent: _ } => {
let guid = if arch == "x64" {
WEBVIEW2_X64_INSTALLER_GUID
} else {
WEBVIEW2_X86_INSTALLER_GUID
};
let offline_installer_path = tauri_tools_path
.join("Webview2OfflineInstaller")
.join(guid)
.join(arch);
create_dir_all(&offline_installer_path)?;
let webview2_installer_path =
offline_installer_path.join("MicrosoftEdgeWebView2RuntimeInstaller.exe");
if!webview2_installer_path.exists() {
std::fs::write(
&webview2_installer_path,
download(
&format!("https://msedge.sf.dl.delivery.mp.microsoft.com/filestreamingservice/files/{}/MicrosoftEdgeWebView2RuntimeInstaller{}.exe",
guid,
arch.to_uppercase(),
),
)?,
)?;
}
data.insert("webview2_installer_path", to_json(webview2_installer_path));
}
_ => {}
}
let mut handlebars = Handlebars::new();
handlebars.register_helper("or", Box::new(handlebars_or));
handlebars.register_helper("association-description", Box::new(association_description));
handlebars.register_escape_fn(|s| {
let mut output = String::new();
for c in s.chars() {
match c {
'\"' => output.push_str("$\\\""),
'$' => output.push_str("$$"),
'`' => output.push_str("$\\`"),
'\n' => output.push_str("$\\n"),
'\t' => output.push_str("$\\t"),
'\r' => output.push_str("$\\r"),
_ => output.push(c),
}
}
output
});
if let Some(path) = custom_template_path {
handlebars
.register_template_string("installer.nsi", std::fs::read_to_string(path)?)
.map_err(|e| e.to_string())
.expect("Failed to setup custom handlebar template");
} else {
handlebars
.register_template_string("installer.nsi", include_str!("./templates/installer.nsi"))
.map_err(|e| e.to_string())
.expect("Failed to setup handlebar template");
}
write_ut16_le_with_bom(
&output_path.join("FileAssociation.nsh"),
include_str!("./templates/FileAssociation.nsh"),
)?;
let installer_nsi_path = output_path.join("installer.nsi");
write_ut16_le_with_bom(
&installer_nsi_path,
handlebars.render("installer.nsi", &data)?.as_str(),
)?;
for (lang, data) in languages_data.iter() {
if let Some(content) = data {
write_ut16_le_with_bom(output_path.join(lang).with_extension("nsh"), content)?;
}
}
let package_base_name = format!(
"{}_{}_{}-setup",
main_binary.name().replace(".exe", ""),
settings.version_string(),
arch,
);
let nsis_output_path = output_path.join(out_file);
let nsis_installer_path = settings.project_out_directory().to_path_buf().join(format!(
"bundle/{}/{}.exe",
if updater {
NSIS_UPDATER_OUTPUT_FOLDER_NAME
} else {
NSIS_OUTPUT_FOLDER_NAME
},
package_base_name
));
create_dir_all(nsis_installer_path.parent().unwrap())?;
info!(action = "Running"; "makensis.exe to produce {}", display_path(&nsis_installer_path));
#[cfg(target_os = "windows")]
let mut nsis_cmd = Command::new(_nsis_toolset_path.join("makensis.exe"));
#[cfg(not(target_os = "windows"))]
let mut nsis_cmd = Command::new("makensis");
nsis_cmd
.arg(match settings.log_level() {
log::Level::Error => "-V1",
log::Level::Warn => "-V2",
log::Level::Info => "-V3",
_ => "-V4",
})
.arg(installer_nsi_path)
.current_dir(output_path)
.piped()
.context("error running makensis.exe")?;
rename(nsis_output_path, &nsis_installer_path)?;
// Code signing is currently only supported on Windows hosts
#[cfg(target_os = "windows")]
try_sign(&nsis_installer_path, settings)?;
Ok(vec![nsis_installer_path])
}
fn handlebars_or(
h: &handlebars::Helper<'_, '_>,
_: &Handlebars<'_>,
_: &handlebars::Context,
_: &mut handlebars::RenderContext<'_, '_>,
out: &mut dyn handlebars::Output,
) -> handlebars::HelperResult {
let param1 = h.param(0).unwrap().render();
let param2 = h.param(1).unwrap();
out.write(&if param1.is_empty() {
param2.render()
} else {
param1
})?;
Ok(())
}
fn association_description(
h: &handlebars::Helper<'_, '_>,
_: &Handlebars<'_>,
_: &handlebars::Context,
_: &mut handlebars::RenderContext<'_, '_>,
out: &mut dyn handlebars::Output,
) -> handlebars::HelperResult {
let description = h.param(0).unwrap().render();
let ext = h.param(1).unwrap();
out.write(&if description.is_empty() {
format!("{} File", ext.render().to_uppercase())
} else {
description
})?;
Ok(())
}
/// BTreeMap<OriginalPath, (ParentOfTargetPath, TargetPath)>
type ResourcesMap = BTreeMap<PathBuf, (String, PathBuf)>;
fn generate_resource_data(settings: &Settings) -> crate::Result<ResourcesMap> {
let mut resources = ResourcesMap::new();
let cwd = std::env::current_dir()?;
let mut added_resources = Vec::new();
for src in settings.resource_files() {
let src = src?;
let resource_path = dunce::canonicalize(cwd.join(&src))?;
// In some glob resource paths like `assets/**/*` a file might appear twice
// because the `tauri_utils::resources::ResourcePaths` iterator also reads a directory
// when it finds one. So we must check it before processing the file.
if added_resources.contains(&resource_path) {
continue;
}
added_resources.push(resource_path.clone());
let target_path = resource_relpath(&src);
resources.insert(
resource_path,
(
target_path
.parent()
.map(|p| p.to_string_lossy().to_string())
.unwrap_or_default(),
target_path,
),
);
}
Ok(resources)
}
/// BTreeMap<OriginalPath, TargetFileName>
type BinariesMap = BTreeMap<PathBuf, String>;
fn generate_binaries_data(settings: &Settings) -> crate::Result<BinariesMap> {
let mut binaries = BinariesMap::new();
let cwd = std::env::current_dir()?;
for src in settings.external_binaries() {
let src = src?;
let binary_path = dunce::canonicalize(cwd.join(&src))?;
let dest_filename = src
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.replace(&format!("-{}", settings.target()), "");
binaries.insert(binary_path, dest_filename);
}
for bin in settings.binaries() {
if!bin.main() {
let bin_path = settings.binary_path(bin);
binaries.insert(
bin_path.clone(),
bin_path
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.to_string(),
);
}
}
Ok(binaries)
}
fn get_lang_data(
lang: &str,
custom_lang_files: Option<&HashMap<String, PathBuf>>,
) -> crate::Result<Option<(PathBuf, Option<&'static str>)>> {
if let Some(path) = custom_lang_files.and_then(|h| h.get(lang)) {
return Ok(Some((dunce::canonicalize(path)?, None)));
}
let lang_path = PathBuf::from(format!("{lang}.nsh"));
let lang_content = match lang.to_lowercase().as_str() {
"arabic" => Some(include_str!("./templates/nsis-languages/Arabic.nsh")),
"bulgarian" => Some(include_str!("./templates/nsis-languages/Bulgarian.nsh")),
"dutch" => Some(include_str!("./templates/nsis-languages/Dutch.nsh")),
"english" => Some(include_str!("./templates/nsis-languages/English.nsh")),
"japanese" => Some(include_str!("./templates/nsis-languages/Japanese.nsh")),
"korean" => Some(include_str!("./templates/nsis-languages/Korean.nsh")),
"portuguesebr" => Some(include_str!("./templates/nsis-languages/PortugueseBR.nsh")),
"tradchinese" => Some(include_str!("./templates/nsis-languages/TradChinese.nsh")),
"simpchinese" => Some(include_str!("./templates/nsis-languages/SimpChinese.nsh")),
"french" => Some(include_str!("./templates/nsis-languages/French.nsh")),
"spanish" => Some(include_str!("./templates/nsis-languages/Spanish.nsh")),
"spanishinternational" => Some(include_str!(
"./templates/nsis-languages/SpanishInternational.nsh"
)),
"persian" => Some(include_str!("./templates/nsis-languages/Persian.nsh")),
"turkish" => Some(include_str!("./templates/nsis-languages/Turkish.nsh")),
"swedish" => Some(include_str!("./templates/nsis-languages/Swedish.nsh")),
_ => return Ok(None),
};
Ok(Some((lang_path, lang_content)))
}
fn write_ut16_le_with_bom<P: AsRef<Path>>(path: P, content: &str) -> crate::Result<()> {
use std::fs::File;
use std::io::{BufWriter, Write};
let file = File::create(path)?;
let mut output = BufWriter::new(file);
output.write_all(&[0xFF, 0xFE])?; // the BOM part
for utf16 in content.encode_utf16() {
output.write_all(&utf16.to_le_bytes())?;
}
Ok(())
}
| bundle_project | identifier_name |
nsis.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
#[cfg(target_os = "windows")]
use crate::bundle::windows::util::try_sign;
use crate::{
bundle::{
common::CommandExt,
windows::util::{
download, download_and_verify, extract_zip, HashAlgorithm, NSIS_OUTPUT_FOLDER_NAME,
NSIS_UPDATER_OUTPUT_FOLDER_NAME, WEBVIEW2_BOOTSTRAPPER_URL, WEBVIEW2_X64_INSTALLER_GUID,
WEBVIEW2_X86_INSTALLER_GUID,
},
},
Settings,
};
use tauri_utils::display_path;
use anyhow::Context;
use handlebars::{to_json, Handlebars};
use log::{info, warn};
use tauri_utils::{
config::{NSISInstallerMode, WebviewInstallMode},
resources::resource_relpath,
};
use std::{
collections::{BTreeMap, HashMap},
fs::{copy, create_dir_all, remove_dir_all, rename, write},
path::{Path, PathBuf},
process::Command,
};
// URLS for the NSIS toolchain.
#[cfg(target_os = "windows")]
const NSIS_URL: &str =
"https://github.com/tauri-apps/binary-releases/releases/download/nsis-3/nsis-3.zip";
#[cfg(target_os = "windows")]
const NSIS_SHA1: &str = "057e83c7d82462ec394af76c87d06733605543d4";
const NSIS_APPLICATIONID_URL: &str = "https://github.com/tauri-apps/binary-releases/releases/download/nsis-plugins-v0/NSIS-ApplicationID.zip";
const NSIS_TAURI_UTILS: &str =
"https://github.com/tauri-apps/nsis-tauri-utils/releases/download/nsis_tauri_utils-v0.1.1/nsis_tauri_utils.dll";
const NSIS_TAURI_UTILS_SHA1: &str = "A21C67CF5AB6D4274AFFF0D68CFCE680D213DDC7";
#[cfg(target_os = "windows")]
const NSIS_REQUIRED_FILES: &[&str] = &[
"makensis.exe",
"Bin/makensis.exe",
"Stubs/lzma-x86-unicode",
"Stubs/lzma_solid-x86-unicode",
"Plugins/x86-unicode/ApplicationID.dll",
"Plugins/x86-unicode/nsis_tauri_utils.dll",
"Include/MUI2.nsh",
"Include/FileFunc.nsh",
"Include/x64.nsh",
"Include/nsDialogs.nsh",
"Include/WinMessages.nsh",
];
#[cfg(not(target_os = "windows"))]
const NSIS_REQUIRED_FILES: &[&str] = &[
"Plugins/x86-unicode/ApplicationID.dll",
"Plugins/x86-unicode/nsis_tauri_utils.dll",
];
/// Runs all of the commands to build the NSIS installer.
/// Returns a vector of PathBuf that shows where the NSIS installer was created.
pub fn bundle_project(settings: &Settings, updater: bool) -> crate::Result<Vec<PathBuf>> {
let tauri_tools_path = dirs_next::cache_dir().unwrap().join("tauri");
let nsis_toolset_path = tauri_tools_path.join("NSIS");
if!nsis_toolset_path.exists() {
get_and_extract_nsis(&nsis_toolset_path, &tauri_tools_path)?;
} else if NSIS_REQUIRED_FILES
.iter()
.any(|p|!nsis_toolset_path.join(p).exists())
{
warn!("NSIS directory is missing some files. Recreating it.");
std::fs::remove_dir_all(&nsis_toolset_path)?;
get_and_extract_nsis(&nsis_toolset_path, &tauri_tools_path)?;
}
build_nsis_app_installer(settings, &nsis_toolset_path, &tauri_tools_path, updater)
}
// Gets NSIS and verifies the download via Sha1
fn get_and_extract_nsis(nsis_toolset_path: &Path, _tauri_tools_path: &Path) -> crate::Result<()> {
info!("Verifying NSIS package");
#[cfg(target_os = "windows")]
{
let data = download_and_verify(NSIS_URL, NSIS_SHA1, HashAlgorithm::Sha1)?; | rename(_tauri_tools_path.join("nsis-3.08"), nsis_toolset_path)?;
}
let nsis_plugins = nsis_toolset_path.join("Plugins");
let data = download(NSIS_APPLICATIONID_URL)?;
info!("extracting NSIS ApplicationID plugin");
extract_zip(&data, &nsis_plugins)?;
create_dir_all(nsis_plugins.join("x86-unicode"))?;
copy(
nsis_plugins
.join("ReleaseUnicode")
.join("ApplicationID.dll"),
nsis_plugins.join("x86-unicode").join("ApplicationID.dll"),
)?;
let data = download_and_verify(NSIS_TAURI_UTILS, NSIS_TAURI_UTILS_SHA1, HashAlgorithm::Sha1)?;
write(
nsis_plugins
.join("x86-unicode")
.join("nsis_tauri_utils.dll"),
data,
)?;
Ok(())
}
fn add_build_number_if_needed(version_str: &str) -> anyhow::Result<String> {
let version = semver::Version::parse(version_str).context("invalid app version")?;
if!version.build.is_empty() {
let build = version.build.parse::<u64>();
if build.is_ok() {
return Ok(format!(
"{}.{}.{}.{}",
version.major, version.minor, version.patch, version.build
));
} else {
anyhow::bail!("optional build metadata in app version must be numeric-only");
}
}
Ok(format!(
"{}.{}.{}.0",
version.major, version.minor, version.patch,
))
}
fn build_nsis_app_installer(
settings: &Settings,
_nsis_toolset_path: &Path,
tauri_tools_path: &Path,
updater: bool,
) -> crate::Result<Vec<PathBuf>> {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
"aarch64" => "arm64",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
info!("Target: {}", arch);
#[cfg(target_os = "windows")]
{
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let app_exe_source = settings.binary_path(main_binary);
try_sign(&app_exe_source, settings)?;
}
#[cfg(not(target_os = "windows"))]
info!("Code signing is currently only supported on Windows hosts, skipping...");
let output_path = settings.project_out_directory().join("nsis").join(arch);
if output_path.exists() {
remove_dir_all(&output_path)?;
}
create_dir_all(&output_path)?;
let mut data = BTreeMap::new();
let bundle_id = settings.bundle_identifier();
let manufacturer = settings
.publisher()
.unwrap_or_else(|| bundle_id.split('.').nth(1).unwrap_or(bundle_id));
#[cfg(not(target_os = "windows"))]
{
let mut dir = dirs_next::cache_dir().unwrap();
dir.extend(["tauri", "NSIS", "Plugins", "x86-unicode"]);
data.insert("additional_plugins_path", to_json(dir));
}
data.insert("arch", to_json(arch));
data.insert("bundle_id", to_json(bundle_id));
data.insert("manufacturer", to_json(manufacturer));
data.insert("product_name", to_json(settings.product_name()));
data.insert("short_description", to_json(settings.short_description()));
data.insert("copyright", to_json(settings.copyright_string()));
let version = settings.version_string();
data.insert("version", to_json(version));
data.insert(
"version_with_build",
to_json(add_build_number_if_needed(version)?),
);
data.insert(
"allow_downgrades",
to_json(settings.windows().allow_downgrades),
);
let mut install_mode = NSISInstallerMode::CurrentUser;
let mut languages = vec!["English".into()];
let mut custom_template_path = None;
let mut custom_language_files = None;
if let Some(nsis) = &settings.windows().nsis {
custom_template_path = nsis.template.clone();
custom_language_files = nsis.custom_language_files.clone();
install_mode = nsis.install_mode;
if let Some(langs) = &nsis.languages {
languages.clear();
languages.extend_from_slice(langs);
}
if let Some(license) = &nsis.license {
data.insert("license", to_json(dunce::canonicalize(license)?));
}
if let Some(installer_icon) = &nsis.installer_icon {
data.insert(
"installer_icon",
to_json(dunce::canonicalize(installer_icon)?),
);
}
if let Some(header_image) = &nsis.header_image {
data.insert("header_image", to_json(dunce::canonicalize(header_image)?));
}
if let Some(sidebar_image) = &nsis.sidebar_image {
data.insert(
"sidebar_image",
to_json(dunce::canonicalize(sidebar_image)?),
);
}
data.insert(
"display_language_selector",
to_json(nsis.display_language_selector && languages.len() > 1),
);
}
data.insert(
"install_mode",
to_json(match install_mode {
NSISInstallerMode::CurrentUser => "currentUser",
NSISInstallerMode::PerMachine => "perMachine",
NSISInstallerMode::Both => "both",
}),
);
let mut languages_data = Vec::new();
for lang in &languages {
if let Some(data) = get_lang_data(lang, custom_language_files.as_ref())? {
languages_data.push(data);
} else {
log::warn!("Custom tauri messages for {lang} are not translated.\nIf it is a valid language listed on <https://github.com/kichik/nsis/tree/9465c08046f00ccb6eda985abbdbf52c275c6c4d/Contrib/Language%20files>, please open a Tauri feature request\n or you can provide a custom language file for it in `tauri.conf.json > tauri > bundle > windows > nsis > custom_language_files`");
}
}
data.insert("languages", to_json(languages.clone()));
data.insert(
"language_files",
to_json(
languages_data
.iter()
.map(|d| d.0.clone())
.collect::<Vec<_>>(),
),
);
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
data.insert(
"main_binary_name",
to_json(main_binary.name().replace(".exe", "")),
);
data.insert(
"main_binary_path",
to_json(settings.binary_path(main_binary).with_extension("exe")),
);
let out_file = "nsis-output.exe";
data.insert("out_file", to_json(out_file));
let resources = generate_resource_data(settings)?;
data.insert("resources", to_json(resources));
let binaries = generate_binaries_data(settings)?;
data.insert("binaries", to_json(binaries));
if let Some(file_associations) = &settings.file_associations() {
data.insert("file_associations", to_json(file_associations));
}
let silent_webview2_install = if let WebviewInstallMode::DownloadBootstrapper { silent }
| WebviewInstallMode::EmbedBootstrapper { silent }
| WebviewInstallMode::OfflineInstaller { silent } =
settings.windows().webview_install_mode
{
silent
} else {
true
};
let webview2_install_mode = if updater {
WebviewInstallMode::DownloadBootstrapper {
silent: silent_webview2_install,
}
} else {
let mut webview_install_mode = settings.windows().webview_install_mode.clone();
if let Some(fixed_runtime_path) = settings.windows().webview_fixed_runtime_path.clone() {
webview_install_mode = WebviewInstallMode::FixedRuntime {
path: fixed_runtime_path,
};
} else if let Some(wix) = &settings.windows().wix {
if wix.skip_webview_install {
webview_install_mode = WebviewInstallMode::Skip;
}
}
webview_install_mode
};
let webview2_installer_args = to_json(if silent_webview2_install {
"/silent"
} else {
""
});
data.insert("webview2_installer_args", to_json(webview2_installer_args));
data.insert(
"install_webview2_mode",
to_json(match webview2_install_mode {
WebviewInstallMode::DownloadBootstrapper { silent: _ } => "downloadBootstrapper",
WebviewInstallMode::EmbedBootstrapper { silent: _ } => "embedBootstrapper",
WebviewInstallMode::OfflineInstaller { silent: _ } => "offlineInstaller",
_ => "",
}),
);
match webview2_install_mode {
WebviewInstallMode::EmbedBootstrapper { silent: _ } => {
let webview2_bootstrapper_path = tauri_tools_path.join("MicrosoftEdgeWebview2Setup.exe");
std::fs::write(
&webview2_bootstrapper_path,
download(WEBVIEW2_BOOTSTRAPPER_URL)?,
)?;
data.insert(
"webview2_bootstrapper_path",
to_json(webview2_bootstrapper_path),
);
}
WebviewInstallMode::OfflineInstaller { silent: _ } => {
let guid = if arch == "x64" {
WEBVIEW2_X64_INSTALLER_GUID
} else {
WEBVIEW2_X86_INSTALLER_GUID
};
let offline_installer_path = tauri_tools_path
.join("Webview2OfflineInstaller")
.join(guid)
.join(arch);
create_dir_all(&offline_installer_path)?;
let webview2_installer_path =
offline_installer_path.join("MicrosoftEdgeWebView2RuntimeInstaller.exe");
if!webview2_installer_path.exists() {
std::fs::write(
&webview2_installer_path,
download(
&format!("https://msedge.sf.dl.delivery.mp.microsoft.com/filestreamingservice/files/{}/MicrosoftEdgeWebView2RuntimeInstaller{}.exe",
guid,
arch.to_uppercase(),
),
)?,
)?;
}
data.insert("webview2_installer_path", to_json(webview2_installer_path));
}
_ => {}
}
let mut handlebars = Handlebars::new();
handlebars.register_helper("or", Box::new(handlebars_or));
handlebars.register_helper("association-description", Box::new(association_description));
handlebars.register_escape_fn(|s| {
let mut output = String::new();
for c in s.chars() {
match c {
'\"' => output.push_str("$\\\""),
'$' => output.push_str("$$"),
'`' => output.push_str("$\\`"),
'\n' => output.push_str("$\\n"),
'\t' => output.push_str("$\\t"),
'\r' => output.push_str("$\\r"),
_ => output.push(c),
}
}
output
});
if let Some(path) = custom_template_path {
handlebars
.register_template_string("installer.nsi", std::fs::read_to_string(path)?)
.map_err(|e| e.to_string())
.expect("Failed to setup custom handlebar template");
} else {
handlebars
.register_template_string("installer.nsi", include_str!("./templates/installer.nsi"))
.map_err(|e| e.to_string())
.expect("Failed to setup handlebar template");
}
write_ut16_le_with_bom(
&output_path.join("FileAssociation.nsh"),
include_str!("./templates/FileAssociation.nsh"),
)?;
let installer_nsi_path = output_path.join("installer.nsi");
write_ut16_le_with_bom(
&installer_nsi_path,
handlebars.render("installer.nsi", &data)?.as_str(),
)?;
for (lang, data) in languages_data.iter() {
if let Some(content) = data {
write_ut16_le_with_bom(output_path.join(lang).with_extension("nsh"), content)?;
}
}
let package_base_name = format!(
"{}_{}_{}-setup",
main_binary.name().replace(".exe", ""),
settings.version_string(),
arch,
);
let nsis_output_path = output_path.join(out_file);
let nsis_installer_path = settings.project_out_directory().to_path_buf().join(format!(
"bundle/{}/{}.exe",
if updater {
NSIS_UPDATER_OUTPUT_FOLDER_NAME
} else {
NSIS_OUTPUT_FOLDER_NAME
},
package_base_name
));
create_dir_all(nsis_installer_path.parent().unwrap())?;
info!(action = "Running"; "makensis.exe to produce {}", display_path(&nsis_installer_path));
#[cfg(target_os = "windows")]
let mut nsis_cmd = Command::new(_nsis_toolset_path.join("makensis.exe"));
#[cfg(not(target_os = "windows"))]
let mut nsis_cmd = Command::new("makensis");
nsis_cmd
.arg(match settings.log_level() {
log::Level::Error => "-V1",
log::Level::Warn => "-V2",
log::Level::Info => "-V3",
_ => "-V4",
})
.arg(installer_nsi_path)
.current_dir(output_path)
.piped()
.context("error running makensis.exe")?;
rename(nsis_output_path, &nsis_installer_path)?;
// Code signing is currently only supported on Windows hosts
#[cfg(target_os = "windows")]
try_sign(&nsis_installer_path, settings)?;
Ok(vec![nsis_installer_path])
}
fn handlebars_or(
h: &handlebars::Helper<'_, '_>,
_: &Handlebars<'_>,
_: &handlebars::Context,
_: &mut handlebars::RenderContext<'_, '_>,
out: &mut dyn handlebars::Output,
) -> handlebars::HelperResult {
let param1 = h.param(0).unwrap().render();
let param2 = h.param(1).unwrap();
out.write(&if param1.is_empty() {
param2.render()
} else {
param1
})?;
Ok(())
}
fn association_description(
h: &handlebars::Helper<'_, '_>,
_: &Handlebars<'_>,
_: &handlebars::Context,
_: &mut handlebars::RenderContext<'_, '_>,
out: &mut dyn handlebars::Output,
) -> handlebars::HelperResult {
let description = h.param(0).unwrap().render();
let ext = h.param(1).unwrap();
out.write(&if description.is_empty() {
format!("{} File", ext.render().to_uppercase())
} else {
description
})?;
Ok(())
}
/// BTreeMap<OriginalPath, (ParentOfTargetPath, TargetPath)>
type ResourcesMap = BTreeMap<PathBuf, (String, PathBuf)>;
fn generate_resource_data(settings: &Settings) -> crate::Result<ResourcesMap> {
let mut resources = ResourcesMap::new();
let cwd = std::env::current_dir()?;
let mut added_resources = Vec::new();
for src in settings.resource_files() {
let src = src?;
let resource_path = dunce::canonicalize(cwd.join(&src))?;
// In some glob resource paths like `assets/**/*` a file might appear twice
// because the `tauri_utils::resources::ResourcePaths` iterator also reads a directory
// when it finds one. So we must check it before processing the file.
if added_resources.contains(&resource_path) {
continue;
}
added_resources.push(resource_path.clone());
let target_path = resource_relpath(&src);
resources.insert(
resource_path,
(
target_path
.parent()
.map(|p| p.to_string_lossy().to_string())
.unwrap_or_default(),
target_path,
),
);
}
Ok(resources)
}
/// BTreeMap<OriginalPath, TargetFileName>
type BinariesMap = BTreeMap<PathBuf, String>;
fn generate_binaries_data(settings: &Settings) -> crate::Result<BinariesMap> {
let mut binaries = BinariesMap::new();
let cwd = std::env::current_dir()?;
for src in settings.external_binaries() {
let src = src?;
let binary_path = dunce::canonicalize(cwd.join(&src))?;
let dest_filename = src
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.replace(&format!("-{}", settings.target()), "");
binaries.insert(binary_path, dest_filename);
}
for bin in settings.binaries() {
if!bin.main() {
let bin_path = settings.binary_path(bin);
binaries.insert(
bin_path.clone(),
bin_path
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.to_string(),
);
}
}
Ok(binaries)
}
fn get_lang_data(
lang: &str,
custom_lang_files: Option<&HashMap<String, PathBuf>>,
) -> crate::Result<Option<(PathBuf, Option<&'static str>)>> {
if let Some(path) = custom_lang_files.and_then(|h| h.get(lang)) {
return Ok(Some((dunce::canonicalize(path)?, None)));
}
let lang_path = PathBuf::from(format!("{lang}.nsh"));
let lang_content = match lang.to_lowercase().as_str() {
"arabic" => Some(include_str!("./templates/nsis-languages/Arabic.nsh")),
"bulgarian" => Some(include_str!("./templates/nsis-languages/Bulgarian.nsh")),
"dutch" => Some(include_str!("./templates/nsis-languages/Dutch.nsh")),
"english" => Some(include_str!("./templates/nsis-languages/English.nsh")),
"japanese" => Some(include_str!("./templates/nsis-languages/Japanese.nsh")),
"korean" => Some(include_str!("./templates/nsis-languages/Korean.nsh")),
"portuguesebr" => Some(include_str!("./templates/nsis-languages/PortugueseBR.nsh")),
"tradchinese" => Some(include_str!("./templates/nsis-languages/TradChinese.nsh")),
"simpchinese" => Some(include_str!("./templates/nsis-languages/SimpChinese.nsh")),
"french" => Some(include_str!("./templates/nsis-languages/French.nsh")),
"spanish" => Some(include_str!("./templates/nsis-languages/Spanish.nsh")),
"spanishinternational" => Some(include_str!(
"./templates/nsis-languages/SpanishInternational.nsh"
)),
"persian" => Some(include_str!("./templates/nsis-languages/Persian.nsh")),
"turkish" => Some(include_str!("./templates/nsis-languages/Turkish.nsh")),
"swedish" => Some(include_str!("./templates/nsis-languages/Swedish.nsh")),
_ => return Ok(None),
};
Ok(Some((lang_path, lang_content)))
}
fn write_ut16_le_with_bom<P: AsRef<Path>>(path: P, content: &str) -> crate::Result<()> {
use std::fs::File;
use std::io::{BufWriter, Write};
let file = File::create(path)?;
let mut output = BufWriter::new(file);
output.write_all(&[0xFF, 0xFE])?; // the BOM part
for utf16 in content.encode_utf16() {
output.write_all(&utf16.to_le_bytes())?;
}
Ok(())
} | info!("extracting NSIS");
extract_zip(&data, _tauri_tools_path)?; | random_line_split |
nsis.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
#[cfg(target_os = "windows")]
use crate::bundle::windows::util::try_sign;
use crate::{
bundle::{
common::CommandExt,
windows::util::{
download, download_and_verify, extract_zip, HashAlgorithm, NSIS_OUTPUT_FOLDER_NAME,
NSIS_UPDATER_OUTPUT_FOLDER_NAME, WEBVIEW2_BOOTSTRAPPER_URL, WEBVIEW2_X64_INSTALLER_GUID,
WEBVIEW2_X86_INSTALLER_GUID,
},
},
Settings,
};
use tauri_utils::display_path;
use anyhow::Context;
use handlebars::{to_json, Handlebars};
use log::{info, warn};
use tauri_utils::{
config::{NSISInstallerMode, WebviewInstallMode},
resources::resource_relpath,
};
use std::{
collections::{BTreeMap, HashMap},
fs::{copy, create_dir_all, remove_dir_all, rename, write},
path::{Path, PathBuf},
process::Command,
};
// URLS for the NSIS toolchain.
#[cfg(target_os = "windows")]
const NSIS_URL: &str =
"https://github.com/tauri-apps/binary-releases/releases/download/nsis-3/nsis-3.zip";
#[cfg(target_os = "windows")]
const NSIS_SHA1: &str = "057e83c7d82462ec394af76c87d06733605543d4";
const NSIS_APPLICATIONID_URL: &str = "https://github.com/tauri-apps/binary-releases/releases/download/nsis-plugins-v0/NSIS-ApplicationID.zip";
const NSIS_TAURI_UTILS: &str =
"https://github.com/tauri-apps/nsis-tauri-utils/releases/download/nsis_tauri_utils-v0.1.1/nsis_tauri_utils.dll";
const NSIS_TAURI_UTILS_SHA1: &str = "A21C67CF5AB6D4274AFFF0D68CFCE680D213DDC7";
#[cfg(target_os = "windows")]
const NSIS_REQUIRED_FILES: &[&str] = &[
"makensis.exe",
"Bin/makensis.exe",
"Stubs/lzma-x86-unicode",
"Stubs/lzma_solid-x86-unicode",
"Plugins/x86-unicode/ApplicationID.dll",
"Plugins/x86-unicode/nsis_tauri_utils.dll",
"Include/MUI2.nsh",
"Include/FileFunc.nsh",
"Include/x64.nsh",
"Include/nsDialogs.nsh",
"Include/WinMessages.nsh",
];
#[cfg(not(target_os = "windows"))]
const NSIS_REQUIRED_FILES: &[&str] = &[
"Plugins/x86-unicode/ApplicationID.dll",
"Plugins/x86-unicode/nsis_tauri_utils.dll",
];
/// Runs all of the commands to build the NSIS installer.
/// Returns a vector of PathBuf that shows where the NSIS installer was created.
pub fn bundle_project(settings: &Settings, updater: bool) -> crate::Result<Vec<PathBuf>> {
let tauri_tools_path = dirs_next::cache_dir().unwrap().join("tauri");
let nsis_toolset_path = tauri_tools_path.join("NSIS");
if!nsis_toolset_path.exists() {
get_and_extract_nsis(&nsis_toolset_path, &tauri_tools_path)?;
} else if NSIS_REQUIRED_FILES
.iter()
.any(|p|!nsis_toolset_path.join(p).exists())
{
warn!("NSIS directory is missing some files. Recreating it.");
std::fs::remove_dir_all(&nsis_toolset_path)?;
get_and_extract_nsis(&nsis_toolset_path, &tauri_tools_path)?;
}
build_nsis_app_installer(settings, &nsis_toolset_path, &tauri_tools_path, updater)
}
// Gets NSIS and verifies the download via Sha1
fn get_and_extract_nsis(nsis_toolset_path: &Path, _tauri_tools_path: &Path) -> crate::Result<()> {
info!("Verifying NSIS package");
#[cfg(target_os = "windows")]
{
let data = download_and_verify(NSIS_URL, NSIS_SHA1, HashAlgorithm::Sha1)?;
info!("extracting NSIS");
extract_zip(&data, _tauri_tools_path)?;
rename(_tauri_tools_path.join("nsis-3.08"), nsis_toolset_path)?;
}
let nsis_plugins = nsis_toolset_path.join("Plugins");
let data = download(NSIS_APPLICATIONID_URL)?;
info!("extracting NSIS ApplicationID plugin");
extract_zip(&data, &nsis_plugins)?;
create_dir_all(nsis_plugins.join("x86-unicode"))?;
copy(
nsis_plugins
.join("ReleaseUnicode")
.join("ApplicationID.dll"),
nsis_plugins.join("x86-unicode").join("ApplicationID.dll"),
)?;
let data = download_and_verify(NSIS_TAURI_UTILS, NSIS_TAURI_UTILS_SHA1, HashAlgorithm::Sha1)?;
write(
nsis_plugins
.join("x86-unicode")
.join("nsis_tauri_utils.dll"),
data,
)?;
Ok(())
}
fn add_build_number_if_needed(version_str: &str) -> anyhow::Result<String> {
let version = semver::Version::parse(version_str).context("invalid app version")?;
if!version.build.is_empty() {
let build = version.build.parse::<u64>();
if build.is_ok() {
return Ok(format!(
"{}.{}.{}.{}",
version.major, version.minor, version.patch, version.build
));
} else {
anyhow::bail!("optional build metadata in app version must be numeric-only");
}
}
Ok(format!(
"{}.{}.{}.0",
version.major, version.minor, version.patch,
))
}
fn build_nsis_app_installer(
settings: &Settings,
_nsis_toolset_path: &Path,
tauri_tools_path: &Path,
updater: bool,
) -> crate::Result<Vec<PathBuf>> {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
"aarch64" => "arm64",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
info!("Target: {}", arch);
#[cfg(target_os = "windows")]
{
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let app_exe_source = settings.binary_path(main_binary);
try_sign(&app_exe_source, settings)?;
}
#[cfg(not(target_os = "windows"))]
info!("Code signing is currently only supported on Windows hosts, skipping...");
let output_path = settings.project_out_directory().join("nsis").join(arch);
if output_path.exists() {
remove_dir_all(&output_path)?;
}
create_dir_all(&output_path)?;
let mut data = BTreeMap::new();
let bundle_id = settings.bundle_identifier();
let manufacturer = settings
.publisher()
.unwrap_or_else(|| bundle_id.split('.').nth(1).unwrap_or(bundle_id));
#[cfg(not(target_os = "windows"))]
{
let mut dir = dirs_next::cache_dir().unwrap();
dir.extend(["tauri", "NSIS", "Plugins", "x86-unicode"]);
data.insert("additional_plugins_path", to_json(dir));
}
data.insert("arch", to_json(arch));
data.insert("bundle_id", to_json(bundle_id));
data.insert("manufacturer", to_json(manufacturer));
data.insert("product_name", to_json(settings.product_name()));
data.insert("short_description", to_json(settings.short_description()));
data.insert("copyright", to_json(settings.copyright_string()));
let version = settings.version_string();
data.insert("version", to_json(version));
data.insert(
"version_with_build",
to_json(add_build_number_if_needed(version)?),
);
data.insert(
"allow_downgrades",
to_json(settings.windows().allow_downgrades),
);
let mut install_mode = NSISInstallerMode::CurrentUser;
let mut languages = vec!["English".into()];
let mut custom_template_path = None;
let mut custom_language_files = None;
if let Some(nsis) = &settings.windows().nsis {
custom_template_path = nsis.template.clone();
custom_language_files = nsis.custom_language_files.clone();
install_mode = nsis.install_mode;
if let Some(langs) = &nsis.languages {
languages.clear();
languages.extend_from_slice(langs);
}
if let Some(license) = &nsis.license {
data.insert("license", to_json(dunce::canonicalize(license)?));
}
if let Some(installer_icon) = &nsis.installer_icon {
data.insert(
"installer_icon",
to_json(dunce::canonicalize(installer_icon)?),
);
}
if let Some(header_image) = &nsis.header_image {
data.insert("header_image", to_json(dunce::canonicalize(header_image)?));
}
if let Some(sidebar_image) = &nsis.sidebar_image {
data.insert(
"sidebar_image",
to_json(dunce::canonicalize(sidebar_image)?),
);
}
data.insert(
"display_language_selector",
to_json(nsis.display_language_selector && languages.len() > 1),
);
}
data.insert(
"install_mode",
to_json(match install_mode {
NSISInstallerMode::CurrentUser => "currentUser",
NSISInstallerMode::PerMachine => "perMachine",
NSISInstallerMode::Both => "both",
}),
);
let mut languages_data = Vec::new();
for lang in &languages {
if let Some(data) = get_lang_data(lang, custom_language_files.as_ref())? {
languages_data.push(data);
} else {
log::warn!("Custom tauri messages for {lang} are not translated.\nIf it is a valid language listed on <https://github.com/kichik/nsis/tree/9465c08046f00ccb6eda985abbdbf52c275c6c4d/Contrib/Language%20files>, please open a Tauri feature request\n or you can provide a custom language file for it in `tauri.conf.json > tauri > bundle > windows > nsis > custom_language_files`");
}
}
data.insert("languages", to_json(languages.clone()));
data.insert(
"language_files",
to_json(
languages_data
.iter()
.map(|d| d.0.clone())
.collect::<Vec<_>>(),
),
);
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
data.insert(
"main_binary_name",
to_json(main_binary.name().replace(".exe", "")),
);
data.insert(
"main_binary_path",
to_json(settings.binary_path(main_binary).with_extension("exe")),
);
let out_file = "nsis-output.exe";
data.insert("out_file", to_json(out_file));
let resources = generate_resource_data(settings)?;
data.insert("resources", to_json(resources));
let binaries = generate_binaries_data(settings)?;
data.insert("binaries", to_json(binaries));
if let Some(file_associations) = &settings.file_associations() {
data.insert("file_associations", to_json(file_associations));
}
let silent_webview2_install = if let WebviewInstallMode::DownloadBootstrapper { silent }
| WebviewInstallMode::EmbedBootstrapper { silent }
| WebviewInstallMode::OfflineInstaller { silent } =
settings.windows().webview_install_mode
{
silent
} else {
true
};
let webview2_install_mode = if updater {
WebviewInstallMode::DownloadBootstrapper {
silent: silent_webview2_install,
}
} else {
let mut webview_install_mode = settings.windows().webview_install_mode.clone();
if let Some(fixed_runtime_path) = settings.windows().webview_fixed_runtime_path.clone() {
webview_install_mode = WebviewInstallMode::FixedRuntime {
path: fixed_runtime_path,
};
} else if let Some(wix) = &settings.windows().wix {
if wix.skip_webview_install {
webview_install_mode = WebviewInstallMode::Skip;
}
}
webview_install_mode
};
let webview2_installer_args = to_json(if silent_webview2_install {
"/silent"
} else {
""
});
data.insert("webview2_installer_args", to_json(webview2_installer_args));
data.insert(
"install_webview2_mode",
to_json(match webview2_install_mode {
WebviewInstallMode::DownloadBootstrapper { silent: _ } => "downloadBootstrapper",
WebviewInstallMode::EmbedBootstrapper { silent: _ } => "embedBootstrapper",
WebviewInstallMode::OfflineInstaller { silent: _ } => "offlineInstaller",
_ => "",
}),
);
match webview2_install_mode {
WebviewInstallMode::EmbedBootstrapper { silent: _ } => {
let webview2_bootstrapper_path = tauri_tools_path.join("MicrosoftEdgeWebview2Setup.exe");
std::fs::write(
&webview2_bootstrapper_path,
download(WEBVIEW2_BOOTSTRAPPER_URL)?,
)?;
data.insert(
"webview2_bootstrapper_path",
to_json(webview2_bootstrapper_path),
);
}
WebviewInstallMode::OfflineInstaller { silent: _ } => {
let guid = if arch == "x64" {
WEBVIEW2_X64_INSTALLER_GUID
} else {
WEBVIEW2_X86_INSTALLER_GUID
};
let offline_installer_path = tauri_tools_path
.join("Webview2OfflineInstaller")
.join(guid)
.join(arch);
create_dir_all(&offline_installer_path)?;
let webview2_installer_path =
offline_installer_path.join("MicrosoftEdgeWebView2RuntimeInstaller.exe");
if!webview2_installer_path.exists() {
std::fs::write(
&webview2_installer_path,
download(
&format!("https://msedge.sf.dl.delivery.mp.microsoft.com/filestreamingservice/files/{}/MicrosoftEdgeWebView2RuntimeInstaller{}.exe",
guid,
arch.to_uppercase(),
),
)?,
)?;
}
data.insert("webview2_installer_path", to_json(webview2_installer_path));
}
_ => {}
}
let mut handlebars = Handlebars::new();
handlebars.register_helper("or", Box::new(handlebars_or));
handlebars.register_helper("association-description", Box::new(association_description));
handlebars.register_escape_fn(|s| {
let mut output = String::new();
for c in s.chars() {
match c {
'\"' => output.push_str("$\\\""),
'$' => output.push_str("$$"),
'`' => output.push_str("$\\`"),
'\n' => output.push_str("$\\n"),
'\t' => output.push_str("$\\t"),
'\r' => output.push_str("$\\r"),
_ => output.push(c),
}
}
output
});
if let Some(path) = custom_template_path {
handlebars
.register_template_string("installer.nsi", std::fs::read_to_string(path)?)
.map_err(|e| e.to_string())
.expect("Failed to setup custom handlebar template");
} else {
handlebars
.register_template_string("installer.nsi", include_str!("./templates/installer.nsi"))
.map_err(|e| e.to_string())
.expect("Failed to setup handlebar template");
}
write_ut16_le_with_bom(
&output_path.join("FileAssociation.nsh"),
include_str!("./templates/FileAssociation.nsh"),
)?;
let installer_nsi_path = output_path.join("installer.nsi");
write_ut16_le_with_bom(
&installer_nsi_path,
handlebars.render("installer.nsi", &data)?.as_str(),
)?;
for (lang, data) in languages_data.iter() {
if let Some(content) = data {
write_ut16_le_with_bom(output_path.join(lang).with_extension("nsh"), content)?;
}
}
let package_base_name = format!(
"{}_{}_{}-setup",
main_binary.name().replace(".exe", ""),
settings.version_string(),
arch,
);
let nsis_output_path = output_path.join(out_file);
let nsis_installer_path = settings.project_out_directory().to_path_buf().join(format!(
"bundle/{}/{}.exe",
if updater {
NSIS_UPDATER_OUTPUT_FOLDER_NAME
} else {
NSIS_OUTPUT_FOLDER_NAME
},
package_base_name
));
create_dir_all(nsis_installer_path.parent().unwrap())?;
info!(action = "Running"; "makensis.exe to produce {}", display_path(&nsis_installer_path));
#[cfg(target_os = "windows")]
let mut nsis_cmd = Command::new(_nsis_toolset_path.join("makensis.exe"));
#[cfg(not(target_os = "windows"))]
let mut nsis_cmd = Command::new("makensis");
nsis_cmd
.arg(match settings.log_level() {
log::Level::Error => "-V1",
log::Level::Warn => "-V2",
log::Level::Info => "-V3",
_ => "-V4",
})
.arg(installer_nsi_path)
.current_dir(output_path)
.piped()
.context("error running makensis.exe")?;
rename(nsis_output_path, &nsis_installer_path)?;
// Code signing is currently only supported on Windows hosts
#[cfg(target_os = "windows")]
try_sign(&nsis_installer_path, settings)?;
Ok(vec![nsis_installer_path])
}
fn handlebars_or(
h: &handlebars::Helper<'_, '_>,
_: &Handlebars<'_>,
_: &handlebars::Context,
_: &mut handlebars::RenderContext<'_, '_>,
out: &mut dyn handlebars::Output,
) -> handlebars::HelperResult {
let param1 = h.param(0).unwrap().render();
let param2 = h.param(1).unwrap();
out.write(&if param1.is_empty() {
param2.render()
} else {
param1
})?;
Ok(())
}
fn association_description(
h: &handlebars::Helper<'_, '_>,
_: &Handlebars<'_>,
_: &handlebars::Context,
_: &mut handlebars::RenderContext<'_, '_>,
out: &mut dyn handlebars::Output,
) -> handlebars::HelperResult {
let description = h.param(0).unwrap().render();
let ext = h.param(1).unwrap();
out.write(&if description.is_empty() {
format!("{} File", ext.render().to_uppercase())
} else {
description
})?;
Ok(())
}
/// BTreeMap<OriginalPath, (ParentOfTargetPath, TargetPath)>
type ResourcesMap = BTreeMap<PathBuf, (String, PathBuf)>;
fn generate_resource_data(settings: &Settings) -> crate::Result<ResourcesMap> {
let mut resources = ResourcesMap::new();
let cwd = std::env::current_dir()?;
let mut added_resources = Vec::new();
for src in settings.resource_files() {
let src = src?;
let resource_path = dunce::canonicalize(cwd.join(&src))?;
// In some glob resource paths like `assets/**/*` a file might appear twice
// because the `tauri_utils::resources::ResourcePaths` iterator also reads a directory
// when it finds one. So we must check it before processing the file.
if added_resources.contains(&resource_path) {
continue;
}
added_resources.push(resource_path.clone());
let target_path = resource_relpath(&src);
resources.insert(
resource_path,
(
target_path
.parent()
.map(|p| p.to_string_lossy().to_string())
.unwrap_or_default(),
target_path,
),
);
}
Ok(resources)
}
/// BTreeMap<OriginalPath, TargetFileName>
type BinariesMap = BTreeMap<PathBuf, String>;
fn generate_binaries_data(settings: &Settings) -> crate::Result<BinariesMap> {
let mut binaries = BinariesMap::new();
let cwd = std::env::current_dir()?;
for src in settings.external_binaries() {
let src = src?;
let binary_path = dunce::canonicalize(cwd.join(&src))?;
let dest_filename = src
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.replace(&format!("-{}", settings.target()), "");
binaries.insert(binary_path, dest_filename);
}
for bin in settings.binaries() {
if!bin.main() {
let bin_path = settings.binary_path(bin);
binaries.insert(
bin_path.clone(),
bin_path
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.to_string(),
);
}
}
Ok(binaries)
}
fn get_lang_data(
lang: &str,
custom_lang_files: Option<&HashMap<String, PathBuf>>,
) -> crate::Result<Option<(PathBuf, Option<&'static str>)>> | )),
"persian" => Some(include_str!("./templates/nsis-languages/Persian.nsh")),
"turkish" => Some(include_str!("./templates/nsis-languages/Turkish.nsh")),
"swedish" => Some(include_str!("./templates/nsis-languages/Swedish.nsh")),
_ => return Ok(None),
};
Ok(Some((lang_path, lang_content)))
}
fn write_ut16_le_with_bom<P: AsRef<Path>>(path: P, content: &str) -> crate::Result<()> {
use std::fs::File;
use std::io::{BufWriter, Write};
let file = File::create(path)?;
let mut output = BufWriter::new(file);
output.write_all(&[0xFF, 0xFE])?; // the BOM part
for utf16 in content.encode_utf16() {
output.write_all(&utf16.to_le_bytes())?;
}
Ok(())
}
| {
if let Some(path) = custom_lang_files.and_then(|h| h.get(lang)) {
return Ok(Some((dunce::canonicalize(path)?, None)));
}
let lang_path = PathBuf::from(format!("{lang}.nsh"));
let lang_content = match lang.to_lowercase().as_str() {
"arabic" => Some(include_str!("./templates/nsis-languages/Arabic.nsh")),
"bulgarian" => Some(include_str!("./templates/nsis-languages/Bulgarian.nsh")),
"dutch" => Some(include_str!("./templates/nsis-languages/Dutch.nsh")),
"english" => Some(include_str!("./templates/nsis-languages/English.nsh")),
"japanese" => Some(include_str!("./templates/nsis-languages/Japanese.nsh")),
"korean" => Some(include_str!("./templates/nsis-languages/Korean.nsh")),
"portuguesebr" => Some(include_str!("./templates/nsis-languages/PortugueseBR.nsh")),
"tradchinese" => Some(include_str!("./templates/nsis-languages/TradChinese.nsh")),
"simpchinese" => Some(include_str!("./templates/nsis-languages/SimpChinese.nsh")),
"french" => Some(include_str!("./templates/nsis-languages/French.nsh")),
"spanish" => Some(include_str!("./templates/nsis-languages/Spanish.nsh")),
"spanishinternational" => Some(include_str!(
"./templates/nsis-languages/SpanishInternational.nsh" | identifier_body |
mod.rs | //! Abstraction over multiple versions of the file format allowed
//!
//! Because we want to continue to properly handle old config file formats - even when they'll no
//! longer be generated by default. In the future, we might provide some kind of feature-gating for
//! older versions, so that the dependencies associated only with them don't inflate compile times.
//!
//! All of the submodules here correspond to a particular version -- with the current one also
//! included. Each submodule defines a `FileContent` struct that implements the [`FileContent`]
//! trait in this module, so that we can cleanly abstract over the file format without changing the
//! logic in `crate::app`. The `FileContent` from the latest version is re-exported in this module
//! as [`CurrentFileContent`].
//!
//! Each submodule additionally defines a couple items:
//! ```ignore
//! // If this file format has been deprecated, a warning to indicate as such
//! pub const WARNING: Option<Warning> =...;
//!
//! // Given a file that we know starts with the correct version prefix, parse it
//! //
//! // If any errors are encountered, print them and exit.
//! pub fn parse(file_content: String) -> FileContent {... }
//! ```
//! Those are used by the `parse` function at the bottom of this file.
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::fmt::{self, Display, Formatter};
use std::fs::read_to_string;
use std::ops::Range;
use std::path::Path;
use std::process::exit;
use std::time::SystemTime;
mod errors;
mod latest;
mod v0_2;
mod v0_3;
mod v0_4;
pub use errors::*;
/// Helper struct for file contents with an attached key
///
/// This is extracted into this module so that it's able to be used by multiple submodules without
/// redefinition.
pub struct Keyed<C> {
key: Option<Vec<u8>>,
unsaved: bool,
content: C,
}
impl<C> Keyed<C> {
/// Creates a new `Keyed` content without the key
fn new(content: C) -> Self {
Keyed {
key: None,
unsaved: false,
content,
}
}
}
/// A particular file format, kept object-safe so that we can switch on it at runtime
///
/// The first couple methods here are for providing the "meta" methods -- the rest merely provide
/// the shared facilities for interaction with contents of the file.
pub trait FileContent {
/// Helper function to convert to the latest version, given the user's password
///
/// It's customary for this method to only convert to the next version internally, and instead
/// rely upon that version's implementation of producing the current file content. This chain
/// terminates with with the implementation for `CurrentFileContent`, which just returns itself.
fn to_current(self: Box<Self>, pwd: String) -> Result<Box<CurrentFileContent>, DecryptError>;
/// Provides the string that the file content should be written as
///
/// This method is provided -- instead of directly writing to the file -- so that no error
/// handling needs to be done within the implementation itself.
fn write(&self) -> String;
/// Sets the key, returning `Err` if it was invalid
fn set_key(&mut self, key: String) -> Result<(), DecryptError>;
/// Returns true if there have been changes made to the file without saving
///
/// Changes should be registered as unsaved until a call to `saved` is made
fn unsaved(&self) -> bool;
/// Registers any unsaved change as now being saved
fn mark_saved(&mut self);
/// Returns whether the entries have been decrypted -- true after `set_key` returns `Ok`
fn decrypted(&self) -> bool;
/// Produces the number of entries in the file
fn num_entries(&self) -> usize;
/// Produces the entry corresponding to the given index
fn entry(&self, idx: usize) -> Box<dyn EntryRef + '_>;
/// Produces a mutable reference to the corresponding entry
fn entry_mut(&mut self, idx: usize) -> Box<dyn EntryMut + '_>;
/// Gives access to the specified range of entries
///
/// The default implementation should suffice, but it *may* be possible for other
/// implementations to improve the performance. In practice, the performance should not matter
/// much.
fn entries_range(&self, range: Range<usize>) -> Vec<Box<dyn EntryRef + '_>> {
range.map(|i| self.entry(i)).collect()
}
/// Returns all of the entires
///
/// The default implementation here should suffice
fn all_entries(&self) -> Vec<Box<dyn EntryRef + '_>> {
let len = self.num_entries();
self.entries_range(0..len)
}
/// Adds an empty entry with the given name and returns its index
fn add_empty_entry(&mut self, name: String) -> usize;
/// Removes the entry at the given index | /// An immutable handle on an entry in the file
pub trait EntryRef {
/// Returns the title of the entry
fn name(&self) -> &str;
/// Returns all the tags associated with the entry
fn tags(&self) -> Vec<&str>;
/// Returns the date + time at which the
fn first_added(&self) -> SystemTime;
/// Returns the date + time the entry was last updated
fn last_update(&self) -> SystemTime;
/// Returns a reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field(&self, idx: usize) -> Box<dyn FieldRef + '_>;
/// Returns the number of fields in the entry
fn num_fields(&self) -> usize;
}
/// A mutable handle on an entry in the file
pub trait EntryMut: EntryRef {
/// Sets the title of the entry
fn set_name(&mut self, name: String);
/// Sets the tags associated with the entry
fn set_tags(&mut self, tags: Vec<String>);
/// Returns a mutable reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field_mut(&mut self, idx: usize) -> Box<dyn FieldMut + '_>;
/// Creates a `FieldBuilder` that will (possibly) later be provided back
fn field_builder(&self) -> Box<dyn FieldBuilder>;
/// Sets the field at the given index, using the result of a previous call to
/// `self.field_builder()`
///
/// The index may be one greater than the current number of fields, in which case the value
/// should be appended.
fn set_field(
&mut self,
idx: usize,
builder: Box<dyn FieldBuilder>,
) -> Result<(), SetFieldError>;
/// Removes the given field
fn remove_field(&mut self, idx: usize);
}
/// An immutable handle on a single field of an entry
pub trait FieldRef {
/// The name of the field
fn name(&self) -> &str;
/// Returns the type of value inside this field
fn value_kind(&self) -> ValueKind;
/// The value of the field
///
/// For TOTP fields, this is expected to perform the necessary calculations and return the
/// current OTP.
fn value(&self) -> Result<String, GetValueError>;
/// Returns the "plaintext" value of the field
///
/// Unlike `value`, this returns the underlying secret for TOTP fields.
fn plaintext_value(&self) -> Result<PlaintextValue, GetValueError>;
}
/// A mutable handle on a single field of an entry
pub trait FieldMut: FieldRef {
/// Swaps the encryption of the field
fn swap_encryption(&mut self) -> Result<(), SwapEncryptionError>;
}
/// The types of values a field might have
#[derive(Debug, Copy, Clone)]
pub enum ValueKind {
Basic,
Protected,
Totp,
}
impl Display for ValueKind {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
ValueKind::Basic => f.write_str("Basic"),
ValueKind::Protected => f.write_str("Protected"),
ValueKind::Totp => f.write_str("TOTP"),
}
}
}
/// Helper type for constructing a field
pub trait FieldBuilder: Any {
/// Helper method to recover the original type
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Converts the builder to build a "manual" field
fn make_manual(&mut self);
/// Converts the builder to build a TOTP field
fn make_totp(&mut self) -> Result<(), UnsupportedFeature>;
/// Sets the name of the field
fn set_name(&mut self, name: String);
/// Sets the value of the field
///
/// ## Panics
///
/// This method panics if there was no previous successful call to the matching `make_*` method
/// for the value (`make_manual` or `make_totp`).
fn set_value(&mut self, value: PlaintextValue);
}
/// The latest version of the file content -- the most recent implementor of [`FileContent`]
pub type CurrentFileContent = Keyed<v0_4::FileContent>;
/// A warning given after opening a file with a particular format version
pub struct Warning {
pub reason: &'static str,
}
/// Parses a `FileContent` from the provided file, exiting the program on failure
pub fn parse(file: &Path) -> (Box<dyn FileContent>, Option<Warning>) {
let content = match read_to_string(file) {
Ok(c) => c,
Err(e) => {
eprintln!("failed to read file {:?}: {}", file.to_string_lossy(), e);
exit(1);
}
};
macro_rules! prefix_match {
($val:expr => { $($str:literal => $arm:expr,)* _ => $else_arm:expr, }) => {{
let v = $val;
$(if v.starts_with($str) {
$arm
} else)* {
$else_arm
}
}};
}
prefix_match!(content.as_str() => {
"---\nversion: v0.2\n" => (Box::new(v0_2::parse(content)), v0_2::WARNING),
"---\nversion: v0.3\n" => (Box::new(v0_3::parse(content)), v0_3::WARNING),
"---\nversion: v0.4\n" => (Box::new(v0_4::parse(content)), v0_4::WARNING),
_ => {
eprintln!("unrecognized file version, should be one of: ['v0.2', 'v0.3', 'v0.4']");
exit(1)
},
})
}
/// Return type for [`CurrentFileContent::to_plaintext`]
///
/// This is used both to convert between `FileContent` versions *and* to within the
/// `emit-plaintext` and `from-plaintext` subcommands.
#[derive(Serialize, Deserialize)]
pub struct PlaintextContent {
last_update: SystemTime,
entries: Vec<PlaintextEntry>,
}
#[derive(Serialize, Deserialize)]
struct PlaintextEntry {
name: String,
tags: Vec<String>,
fields: Vec<PlaintextField>,
first_added: SystemTime,
last_update: SystemTime,
}
#[derive(Serialize, Deserialize)]
struct PlaintextField {
name: String,
value: PlaintextValue,
}
#[derive(Serialize, Deserialize)]
pub enum PlaintextValue {
Manual { value: String, protected: bool },
Totp { secret: String, issuer: String },
}
impl PlaintextContent {
/// Produces a new, empty `PlaintextContent` with the current time as its last update
fn init() -> Self {
PlaintextContent {
last_update: SystemTime::now(),
entries: Vec::new(),
}
}
} | fn remove_entry(&mut self, idx: usize);
}
| random_line_split |
mod.rs | //! Abstraction over multiple versions of the file format allowed
//!
//! Because we want to continue to properly handle old config file formats - even when they'll no
//! longer be generated by default. In the future, we might provide some kind of feature-gating for
//! older versions, so that the dependencies associated only with them don't inflate compile times.
//!
//! All of the submodules here correspond to a particular version -- with the current one also
//! included. Each submodule defines a `FileContent` struct that implements the [`FileContent`]
//! trait in this module, so that we can cleanly abstract over the file format without changing the
//! logic in `crate::app`. The `FileContent` from the latest version is re-exported in this module
//! as [`CurrentFileContent`].
//!
//! Each submodule additionally defines a couple items:
//! ```ignore
//! // If this file format has been deprecated, a warning to indicate as such
//! pub const WARNING: Option<Warning> =...;
//!
//! // Given a file that we know starts with the correct version prefix, parse it
//! //
//! // If any errors are encountered, print them and exit.
//! pub fn parse(file_content: String) -> FileContent {... }
//! ```
//! Those are used by the `parse` function at the bottom of this file.
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::fmt::{self, Display, Formatter};
use std::fs::read_to_string;
use std::ops::Range;
use std::path::Path;
use std::process::exit;
use std::time::SystemTime;
mod errors;
mod latest;
mod v0_2;
mod v0_3;
mod v0_4;
pub use errors::*;
/// Helper struct for file contents with an attached key
///
/// This is extracted into this module so that it's able to be used by multiple submodules without
/// redefinition.
pub struct Keyed<C> {
key: Option<Vec<u8>>,
unsaved: bool,
content: C,
}
impl<C> Keyed<C> {
/// Creates a new `Keyed` content without the key
fn new(content: C) -> Self {
Keyed {
key: None,
unsaved: false,
content,
}
}
}
/// A particular file format, kept object-safe so that we can switch on it at runtime
///
/// The first couple methods here are for providing the "meta" methods -- the rest merely provide
/// the shared facilities for interaction with contents of the file.
pub trait FileContent {
/// Helper function to convert to the latest version, given the user's password
///
/// It's customary for this method to only convert to the next version internally, and instead
/// rely upon that version's implementation of producing the current file content. This chain
/// terminates with with the implementation for `CurrentFileContent`, which just returns itself.
fn to_current(self: Box<Self>, pwd: String) -> Result<Box<CurrentFileContent>, DecryptError>;
/// Provides the string that the file content should be written as
///
/// This method is provided -- instead of directly writing to the file -- so that no error
/// handling needs to be done within the implementation itself.
fn write(&self) -> String;
/// Sets the key, returning `Err` if it was invalid
fn set_key(&mut self, key: String) -> Result<(), DecryptError>;
/// Returns true if there have been changes made to the file without saving
///
/// Changes should be registered as unsaved until a call to `saved` is made
fn unsaved(&self) -> bool;
/// Registers any unsaved change as now being saved
fn mark_saved(&mut self);
/// Returns whether the entries have been decrypted -- true after `set_key` returns `Ok`
fn decrypted(&self) -> bool;
/// Produces the number of entries in the file
fn num_entries(&self) -> usize;
/// Produces the entry corresponding to the given index
fn entry(&self, idx: usize) -> Box<dyn EntryRef + '_>;
/// Produces a mutable reference to the corresponding entry
fn entry_mut(&mut self, idx: usize) -> Box<dyn EntryMut + '_>;
/// Gives access to the specified range of entries
///
/// The default implementation should suffice, but it *may* be possible for other
/// implementations to improve the performance. In practice, the performance should not matter
/// much.
fn entries_range(&self, range: Range<usize>) -> Vec<Box<dyn EntryRef + '_>> {
range.map(|i| self.entry(i)).collect()
}
/// Returns all of the entires
///
/// The default implementation here should suffice
fn all_entries(&self) -> Vec<Box<dyn EntryRef + '_>> {
let len = self.num_entries();
self.entries_range(0..len)
}
/// Adds an empty entry with the given name and returns its index
fn add_empty_entry(&mut self, name: String) -> usize;
/// Removes the entry at the given index
fn remove_entry(&mut self, idx: usize);
}
/// An immutable handle on an entry in the file
pub trait EntryRef {
/// Returns the title of the entry
fn name(&self) -> &str;
/// Returns all the tags associated with the entry
fn tags(&self) -> Vec<&str>;
/// Returns the date + time at which the
fn first_added(&self) -> SystemTime;
/// Returns the date + time the entry was last updated
fn last_update(&self) -> SystemTime;
/// Returns a reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field(&self, idx: usize) -> Box<dyn FieldRef + '_>;
/// Returns the number of fields in the entry
fn num_fields(&self) -> usize;
}
/// A mutable handle on an entry in the file
pub trait EntryMut: EntryRef {
/// Sets the title of the entry
fn set_name(&mut self, name: String);
/// Sets the tags associated with the entry
fn set_tags(&mut self, tags: Vec<String>);
/// Returns a mutable reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field_mut(&mut self, idx: usize) -> Box<dyn FieldMut + '_>;
/// Creates a `FieldBuilder` that will (possibly) later be provided back
fn field_builder(&self) -> Box<dyn FieldBuilder>;
/// Sets the field at the given index, using the result of a previous call to
/// `self.field_builder()`
///
/// The index may be one greater than the current number of fields, in which case the value
/// should be appended.
fn set_field(
&mut self,
idx: usize,
builder: Box<dyn FieldBuilder>,
) -> Result<(), SetFieldError>;
/// Removes the given field
fn remove_field(&mut self, idx: usize);
}
/// An immutable handle on a single field of an entry
pub trait FieldRef {
/// The name of the field
fn name(&self) -> &str;
/// Returns the type of value inside this field
fn value_kind(&self) -> ValueKind;
/// The value of the field
///
/// For TOTP fields, this is expected to perform the necessary calculations and return the
/// current OTP.
fn value(&self) -> Result<String, GetValueError>;
/// Returns the "plaintext" value of the field
///
/// Unlike `value`, this returns the underlying secret for TOTP fields.
fn plaintext_value(&self) -> Result<PlaintextValue, GetValueError>;
}
/// A mutable handle on a single field of an entry
pub trait FieldMut: FieldRef {
/// Swaps the encryption of the field
fn swap_encryption(&mut self) -> Result<(), SwapEncryptionError>;
}
/// The types of values a field might have
#[derive(Debug, Copy, Clone)]
pub enum ValueKind {
Basic,
Protected,
Totp,
}
impl Display for ValueKind {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
ValueKind::Basic => f.write_str("Basic"),
ValueKind::Protected => f.write_str("Protected"),
ValueKind::Totp => f.write_str("TOTP"),
}
}
}
/// Helper type for constructing a field
pub trait FieldBuilder: Any {
/// Helper method to recover the original type
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Converts the builder to build a "manual" field
fn make_manual(&mut self);
/// Converts the builder to build a TOTP field
fn make_totp(&mut self) -> Result<(), UnsupportedFeature>;
/// Sets the name of the field
fn set_name(&mut self, name: String);
/// Sets the value of the field
///
/// ## Panics
///
/// This method panics if there was no previous successful call to the matching `make_*` method
/// for the value (`make_manual` or `make_totp`).
fn set_value(&mut self, value: PlaintextValue);
}
/// The latest version of the file content -- the most recent implementor of [`FileContent`]
pub type CurrentFileContent = Keyed<v0_4::FileContent>;
/// A warning given after opening a file with a particular format version
pub struct Warning {
pub reason: &'static str,
}
/// Parses a `FileContent` from the provided file, exiting the program on failure
pub fn parse(file: &Path) -> (Box<dyn FileContent>, Option<Warning>) {
let content = match read_to_string(file) {
Ok(c) => c,
Err(e) => {
eprintln!("failed to read file {:?}: {}", file.to_string_lossy(), e);
exit(1);
}
};
macro_rules! prefix_match {
($val:expr => { $($str:literal => $arm:expr,)* _ => $else_arm:expr, }) => {{
let v = $val;
$(if v.starts_with($str) {
$arm
} else)* {
$else_arm
}
}};
}
prefix_match!(content.as_str() => {
"---\nversion: v0.2\n" => (Box::new(v0_2::parse(content)), v0_2::WARNING),
"---\nversion: v0.3\n" => (Box::new(v0_3::parse(content)), v0_3::WARNING),
"---\nversion: v0.4\n" => (Box::new(v0_4::parse(content)), v0_4::WARNING),
_ => {
eprintln!("unrecognized file version, should be one of: ['v0.2', 'v0.3', 'v0.4']");
exit(1)
},
})
}
/// Return type for [`CurrentFileContent::to_plaintext`]
///
/// This is used both to convert between `FileContent` versions *and* to within the
/// `emit-plaintext` and `from-plaintext` subcommands.
#[derive(Serialize, Deserialize)]
pub struct PlaintextContent {
last_update: SystemTime,
entries: Vec<PlaintextEntry>,
}
#[derive(Serialize, Deserialize)]
struct PlaintextEntry {
name: String,
tags: Vec<String>,
fields: Vec<PlaintextField>,
first_added: SystemTime,
last_update: SystemTime,
}
#[derive(Serialize, Deserialize)]
struct PlaintextField {
name: String,
value: PlaintextValue,
}
#[derive(Serialize, Deserialize)]
pub enum PlaintextValue {
Manual { value: String, protected: bool },
Totp { secret: String, issuer: String },
}
impl PlaintextContent {
/// Produces a new, empty `PlaintextContent` with the current time as its last update
fn init() -> Self |
}
| {
PlaintextContent {
last_update: SystemTime::now(),
entries: Vec::new(),
}
} | identifier_body |
mod.rs | //! Abstraction over multiple versions of the file format allowed
//!
//! Because we want to continue to properly handle old config file formats - even when they'll no
//! longer be generated by default. In the future, we might provide some kind of feature-gating for
//! older versions, so that the dependencies associated only with them don't inflate compile times.
//!
//! All of the submodules here correspond to a particular version -- with the current one also
//! included. Each submodule defines a `FileContent` struct that implements the [`FileContent`]
//! trait in this module, so that we can cleanly abstract over the file format without changing the
//! logic in `crate::app`. The `FileContent` from the latest version is re-exported in this module
//! as [`CurrentFileContent`].
//!
//! Each submodule additionally defines a couple items:
//! ```ignore
//! // If this file format has been deprecated, a warning to indicate as such
//! pub const WARNING: Option<Warning> =...;
//!
//! // Given a file that we know starts with the correct version prefix, parse it
//! //
//! // If any errors are encountered, print them and exit.
//! pub fn parse(file_content: String) -> FileContent {... }
//! ```
//! Those are used by the `parse` function at the bottom of this file.
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::fmt::{self, Display, Formatter};
use std::fs::read_to_string;
use std::ops::Range;
use std::path::Path;
use std::process::exit;
use std::time::SystemTime;
mod errors;
mod latest;
mod v0_2;
mod v0_3;
mod v0_4;
pub use errors::*;
/// Helper struct for file contents with an attached key
///
/// This is extracted into this module so that it's able to be used by multiple submodules without
/// redefinition.
pub struct Keyed<C> {
key: Option<Vec<u8>>,
unsaved: bool,
content: C,
}
impl<C> Keyed<C> {
/// Creates a new `Keyed` content without the key
fn new(content: C) -> Self {
Keyed {
key: None,
unsaved: false,
content,
}
}
}
/// A particular file format, kept object-safe so that we can switch on it at runtime
///
/// The first couple methods here are for providing the "meta" methods -- the rest merely provide
/// the shared facilities for interaction with contents of the file.
pub trait FileContent {
/// Helper function to convert to the latest version, given the user's password
///
/// It's customary for this method to only convert to the next version internally, and instead
/// rely upon that version's implementation of producing the current file content. This chain
/// terminates with with the implementation for `CurrentFileContent`, which just returns itself.
fn to_current(self: Box<Self>, pwd: String) -> Result<Box<CurrentFileContent>, DecryptError>;
/// Provides the string that the file content should be written as
///
/// This method is provided -- instead of directly writing to the file -- so that no error
/// handling needs to be done within the implementation itself.
fn write(&self) -> String;
/// Sets the key, returning `Err` if it was invalid
fn set_key(&mut self, key: String) -> Result<(), DecryptError>;
/// Returns true if there have been changes made to the file without saving
///
/// Changes should be registered as unsaved until a call to `saved` is made
fn unsaved(&self) -> bool;
/// Registers any unsaved change as now being saved
fn mark_saved(&mut self);
/// Returns whether the entries have been decrypted -- true after `set_key` returns `Ok`
fn decrypted(&self) -> bool;
/// Produces the number of entries in the file
fn num_entries(&self) -> usize;
/// Produces the entry corresponding to the given index
fn entry(&self, idx: usize) -> Box<dyn EntryRef + '_>;
/// Produces a mutable reference to the corresponding entry
fn entry_mut(&mut self, idx: usize) -> Box<dyn EntryMut + '_>;
/// Gives access to the specified range of entries
///
/// The default implementation should suffice, but it *may* be possible for other
/// implementations to improve the performance. In practice, the performance should not matter
/// much.
fn entries_range(&self, range: Range<usize>) -> Vec<Box<dyn EntryRef + '_>> {
range.map(|i| self.entry(i)).collect()
}
/// Returns all of the entires
///
/// The default implementation here should suffice
fn all_entries(&self) -> Vec<Box<dyn EntryRef + '_>> {
let len = self.num_entries();
self.entries_range(0..len)
}
/// Adds an empty entry with the given name and returns its index
fn add_empty_entry(&mut self, name: String) -> usize;
/// Removes the entry at the given index
fn remove_entry(&mut self, idx: usize);
}
/// An immutable handle on an entry in the file
pub trait EntryRef {
/// Returns the title of the entry
fn name(&self) -> &str;
/// Returns all the tags associated with the entry
fn tags(&self) -> Vec<&str>;
/// Returns the date + time at which the
fn first_added(&self) -> SystemTime;
/// Returns the date + time the entry was last updated
fn last_update(&self) -> SystemTime;
/// Returns a reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field(&self, idx: usize) -> Box<dyn FieldRef + '_>;
/// Returns the number of fields in the entry
fn num_fields(&self) -> usize;
}
/// A mutable handle on an entry in the file
pub trait EntryMut: EntryRef {
/// Sets the title of the entry
fn set_name(&mut self, name: String);
/// Sets the tags associated with the entry
fn set_tags(&mut self, tags: Vec<String>);
/// Returns a mutable reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field_mut(&mut self, idx: usize) -> Box<dyn FieldMut + '_>;
/// Creates a `FieldBuilder` that will (possibly) later be provided back
fn field_builder(&self) -> Box<dyn FieldBuilder>;
/// Sets the field at the given index, using the result of a previous call to
/// `self.field_builder()`
///
/// The index may be one greater than the current number of fields, in which case the value
/// should be appended.
fn set_field(
&mut self,
idx: usize,
builder: Box<dyn FieldBuilder>,
) -> Result<(), SetFieldError>;
/// Removes the given field
fn remove_field(&mut self, idx: usize);
}
/// An immutable handle on a single field of an entry
pub trait FieldRef {
/// The name of the field
fn name(&self) -> &str;
/// Returns the type of value inside this field
fn value_kind(&self) -> ValueKind;
/// The value of the field
///
/// For TOTP fields, this is expected to perform the necessary calculations and return the
/// current OTP.
fn value(&self) -> Result<String, GetValueError>;
/// Returns the "plaintext" value of the field
///
/// Unlike `value`, this returns the underlying secret for TOTP fields.
fn plaintext_value(&self) -> Result<PlaintextValue, GetValueError>;
}
/// A mutable handle on a single field of an entry
pub trait FieldMut: FieldRef {
/// Swaps the encryption of the field
fn swap_encryption(&mut self) -> Result<(), SwapEncryptionError>;
}
/// The types of values a field might have
#[derive(Debug, Copy, Clone)]
pub enum ValueKind {
Basic,
Protected,
Totp,
}
impl Display for ValueKind {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
ValueKind::Basic => f.write_str("Basic"),
ValueKind::Protected => f.write_str("Protected"),
ValueKind::Totp => f.write_str("TOTP"),
}
}
}
/// Helper type for constructing a field
pub trait FieldBuilder: Any {
/// Helper method to recover the original type
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Converts the builder to build a "manual" field
fn make_manual(&mut self);
/// Converts the builder to build a TOTP field
fn make_totp(&mut self) -> Result<(), UnsupportedFeature>;
/// Sets the name of the field
fn set_name(&mut self, name: String);
/// Sets the value of the field
///
/// ## Panics
///
/// This method panics if there was no previous successful call to the matching `make_*` method
/// for the value (`make_manual` or `make_totp`).
fn set_value(&mut self, value: PlaintextValue);
}
/// The latest version of the file content -- the most recent implementor of [`FileContent`]
pub type CurrentFileContent = Keyed<v0_4::FileContent>;
/// A warning given after opening a file with a particular format version
pub struct Warning {
pub reason: &'static str,
}
/// Parses a `FileContent` from the provided file, exiting the program on failure
pub fn parse(file: &Path) -> (Box<dyn FileContent>, Option<Warning>) {
let content = match read_to_string(file) {
Ok(c) => c,
Err(e) => |
};
macro_rules! prefix_match {
($val:expr => { $($str:literal => $arm:expr,)* _ => $else_arm:expr, }) => {{
let v = $val;
$(if v.starts_with($str) {
$arm
} else)* {
$else_arm
}
}};
}
prefix_match!(content.as_str() => {
"---\nversion: v0.2\n" => (Box::new(v0_2::parse(content)), v0_2::WARNING),
"---\nversion: v0.3\n" => (Box::new(v0_3::parse(content)), v0_3::WARNING),
"---\nversion: v0.4\n" => (Box::new(v0_4::parse(content)), v0_4::WARNING),
_ => {
eprintln!("unrecognized file version, should be one of: ['v0.2', 'v0.3', 'v0.4']");
exit(1)
},
})
}
/// Return type for [`CurrentFileContent::to_plaintext`]
///
/// This is used both to convert between `FileContent` versions *and* to within the
/// `emit-plaintext` and `from-plaintext` subcommands.
#[derive(Serialize, Deserialize)]
pub struct PlaintextContent {
last_update: SystemTime,
entries: Vec<PlaintextEntry>,
}
#[derive(Serialize, Deserialize)]
struct PlaintextEntry {
name: String,
tags: Vec<String>,
fields: Vec<PlaintextField>,
first_added: SystemTime,
last_update: SystemTime,
}
#[derive(Serialize, Deserialize)]
struct PlaintextField {
name: String,
value: PlaintextValue,
}
#[derive(Serialize, Deserialize)]
pub enum PlaintextValue {
Manual { value: String, protected: bool },
Totp { secret: String, issuer: String },
}
impl PlaintextContent {
/// Produces a new, empty `PlaintextContent` with the current time as its last update
fn init() -> Self {
PlaintextContent {
last_update: SystemTime::now(),
entries: Vec::new(),
}
}
}
| {
eprintln!("failed to read file {:?}: {}", file.to_string_lossy(), e);
exit(1);
} | conditional_block |
mod.rs | //! Abstraction over multiple versions of the file format allowed
//!
//! Because we want to continue to properly handle old config file formats - even when they'll no
//! longer be generated by default. In the future, we might provide some kind of feature-gating for
//! older versions, so that the dependencies associated only with them don't inflate compile times.
//!
//! All of the submodules here correspond to a particular version -- with the current one also
//! included. Each submodule defines a `FileContent` struct that implements the [`FileContent`]
//! trait in this module, so that we can cleanly abstract over the file format without changing the
//! logic in `crate::app`. The `FileContent` from the latest version is re-exported in this module
//! as [`CurrentFileContent`].
//!
//! Each submodule additionally defines a couple items:
//! ```ignore
//! // If this file format has been deprecated, a warning to indicate as such
//! pub const WARNING: Option<Warning> =...;
//!
//! // Given a file that we know starts with the correct version prefix, parse it
//! //
//! // If any errors are encountered, print them and exit.
//! pub fn parse(file_content: String) -> FileContent {... }
//! ```
//! Those are used by the `parse` function at the bottom of this file.
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::fmt::{self, Display, Formatter};
use std::fs::read_to_string;
use std::ops::Range;
use std::path::Path;
use std::process::exit;
use std::time::SystemTime;
mod errors;
mod latest;
mod v0_2;
mod v0_3;
mod v0_4;
pub use errors::*;
/// Helper struct for file contents with an attached key
///
/// This is extracted into this module so that it's able to be used by multiple submodules without
/// redefinition.
pub struct Keyed<C> {
key: Option<Vec<u8>>,
unsaved: bool,
content: C,
}
impl<C> Keyed<C> {
/// Creates a new `Keyed` content without the key
fn new(content: C) -> Self {
Keyed {
key: None,
unsaved: false,
content,
}
}
}
/// A particular file format, kept object-safe so that we can switch on it at runtime
///
/// The first couple methods here are for providing the "meta" methods -- the rest merely provide
/// the shared facilities for interaction with contents of the file.
pub trait FileContent {
/// Helper function to convert to the latest version, given the user's password
///
/// It's customary for this method to only convert to the next version internally, and instead
/// rely upon that version's implementation of producing the current file content. This chain
/// terminates with with the implementation for `CurrentFileContent`, which just returns itself.
fn to_current(self: Box<Self>, pwd: String) -> Result<Box<CurrentFileContent>, DecryptError>;
/// Provides the string that the file content should be written as
///
/// This method is provided -- instead of directly writing to the file -- so that no error
/// handling needs to be done within the implementation itself.
fn write(&self) -> String;
/// Sets the key, returning `Err` if it was invalid
fn set_key(&mut self, key: String) -> Result<(), DecryptError>;
/// Returns true if there have been changes made to the file without saving
///
/// Changes should be registered as unsaved until a call to `saved` is made
fn unsaved(&self) -> bool;
/// Registers any unsaved change as now being saved
fn mark_saved(&mut self);
/// Returns whether the entries have been decrypted -- true after `set_key` returns `Ok`
fn decrypted(&self) -> bool;
/// Produces the number of entries in the file
fn num_entries(&self) -> usize;
/// Produces the entry corresponding to the given index
fn entry(&self, idx: usize) -> Box<dyn EntryRef + '_>;
/// Produces a mutable reference to the corresponding entry
fn entry_mut(&mut self, idx: usize) -> Box<dyn EntryMut + '_>;
/// Gives access to the specified range of entries
///
/// The default implementation should suffice, but it *may* be possible for other
/// implementations to improve the performance. In practice, the performance should not matter
/// much.
fn entries_range(&self, range: Range<usize>) -> Vec<Box<dyn EntryRef + '_>> {
range.map(|i| self.entry(i)).collect()
}
/// Returns all of the entires
///
/// The default implementation here should suffice
fn all_entries(&self) -> Vec<Box<dyn EntryRef + '_>> {
let len = self.num_entries();
self.entries_range(0..len)
}
/// Adds an empty entry with the given name and returns its index
fn add_empty_entry(&mut self, name: String) -> usize;
/// Removes the entry at the given index
fn remove_entry(&mut self, idx: usize);
}
/// An immutable handle on an entry in the file
pub trait EntryRef {
/// Returns the title of the entry
fn name(&self) -> &str;
/// Returns all the tags associated with the entry
fn tags(&self) -> Vec<&str>;
/// Returns the date + time at which the
fn first_added(&self) -> SystemTime;
/// Returns the date + time the entry was last updated
fn last_update(&self) -> SystemTime;
/// Returns a reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field(&self, idx: usize) -> Box<dyn FieldRef + '_>;
/// Returns the number of fields in the entry
fn num_fields(&self) -> usize;
}
/// A mutable handle on an entry in the file
pub trait EntryMut: EntryRef {
/// Sets the title of the entry
fn set_name(&mut self, name: String);
/// Sets the tags associated with the entry
fn set_tags(&mut self, tags: Vec<String>);
/// Returns a mutable reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field_mut(&mut self, idx: usize) -> Box<dyn FieldMut + '_>;
/// Creates a `FieldBuilder` that will (possibly) later be provided back
fn field_builder(&self) -> Box<dyn FieldBuilder>;
/// Sets the field at the given index, using the result of a previous call to
/// `self.field_builder()`
///
/// The index may be one greater than the current number of fields, in which case the value
/// should be appended.
fn set_field(
&mut self,
idx: usize,
builder: Box<dyn FieldBuilder>,
) -> Result<(), SetFieldError>;
/// Removes the given field
fn remove_field(&mut self, idx: usize);
}
/// An immutable handle on a single field of an entry
pub trait FieldRef {
/// The name of the field
fn name(&self) -> &str;
/// Returns the type of value inside this field
fn value_kind(&self) -> ValueKind;
/// The value of the field
///
/// For TOTP fields, this is expected to perform the necessary calculations and return the
/// current OTP.
fn value(&self) -> Result<String, GetValueError>;
/// Returns the "plaintext" value of the field
///
/// Unlike `value`, this returns the underlying secret for TOTP fields.
fn plaintext_value(&self) -> Result<PlaintextValue, GetValueError>;
}
/// A mutable handle on a single field of an entry
pub trait FieldMut: FieldRef {
/// Swaps the encryption of the field
fn swap_encryption(&mut self) -> Result<(), SwapEncryptionError>;
}
/// The types of values a field might have
#[derive(Debug, Copy, Clone)]
pub enum | {
Basic,
Protected,
Totp,
}
impl Display for ValueKind {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
ValueKind::Basic => f.write_str("Basic"),
ValueKind::Protected => f.write_str("Protected"),
ValueKind::Totp => f.write_str("TOTP"),
}
}
}
/// Helper type for constructing a field
pub trait FieldBuilder: Any {
/// Helper method to recover the original type
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Converts the builder to build a "manual" field
fn make_manual(&mut self);
/// Converts the builder to build a TOTP field
fn make_totp(&mut self) -> Result<(), UnsupportedFeature>;
/// Sets the name of the field
fn set_name(&mut self, name: String);
/// Sets the value of the field
///
/// ## Panics
///
/// This method panics if there was no previous successful call to the matching `make_*` method
/// for the value (`make_manual` or `make_totp`).
fn set_value(&mut self, value: PlaintextValue);
}
/// The latest version of the file content -- the most recent implementor of [`FileContent`]
pub type CurrentFileContent = Keyed<v0_4::FileContent>;
/// A warning given after opening a file with a particular format version
pub struct Warning {
pub reason: &'static str,
}
/// Parses a `FileContent` from the provided file, exiting the program on failure
pub fn parse(file: &Path) -> (Box<dyn FileContent>, Option<Warning>) {
let content = match read_to_string(file) {
Ok(c) => c,
Err(e) => {
eprintln!("failed to read file {:?}: {}", file.to_string_lossy(), e);
exit(1);
}
};
macro_rules! prefix_match {
($val:expr => { $($str:literal => $arm:expr,)* _ => $else_arm:expr, }) => {{
let v = $val;
$(if v.starts_with($str) {
$arm
} else)* {
$else_arm
}
}};
}
prefix_match!(content.as_str() => {
"---\nversion: v0.2\n" => (Box::new(v0_2::parse(content)), v0_2::WARNING),
"---\nversion: v0.3\n" => (Box::new(v0_3::parse(content)), v0_3::WARNING),
"---\nversion: v0.4\n" => (Box::new(v0_4::parse(content)), v0_4::WARNING),
_ => {
eprintln!("unrecognized file version, should be one of: ['v0.2', 'v0.3', 'v0.4']");
exit(1)
},
})
}
/// Return type for [`CurrentFileContent::to_plaintext`]
///
/// This is used both to convert between `FileContent` versions *and* to within the
/// `emit-plaintext` and `from-plaintext` subcommands.
#[derive(Serialize, Deserialize)]
pub struct PlaintextContent {
last_update: SystemTime,
entries: Vec<PlaintextEntry>,
}
#[derive(Serialize, Deserialize)]
struct PlaintextEntry {
name: String,
tags: Vec<String>,
fields: Vec<PlaintextField>,
first_added: SystemTime,
last_update: SystemTime,
}
#[derive(Serialize, Deserialize)]
struct PlaintextField {
name: String,
value: PlaintextValue,
}
#[derive(Serialize, Deserialize)]
pub enum PlaintextValue {
Manual { value: String, protected: bool },
Totp { secret: String, issuer: String },
}
impl PlaintextContent {
/// Produces a new, empty `PlaintextContent` with the current time as its last update
fn init() -> Self {
PlaintextContent {
last_update: SystemTime::now(),
entries: Vec::new(),
}
}
}
| ValueKind | identifier_name |
main.rs | extern crate petgraph;
extern crate rand;
extern crate time;
extern crate clap;
use std::cmp::{max, min};
use std::collections::HashSet;
use rand::Rng;
use time::PreciseTime;
enum Method {
Any,
All,
}
fn main() {
let matches = clap::App::new("square-sum")
.about("Calculates solutions to the square sum problem")
.author("Matt Williams")
.arg(clap::Arg::with_name("start")
.short("s")
.long("start")
.value_name("N")
.default_value("0")
.help("The start of the sequence to calculate"))
.arg(clap::Arg::with_name("end")
.short("e")
.long("end")
.value_name("N")
.default_value("100")
.help("The end of the sequence to calculate (exclusive)"))
.arg(clap::Arg::with_name("find")
.short("f")
.long("find")
.value_name("METHOD")
.default_value("any")
.possible_values(&["any", "all"])
.help("Whether to find *all* paths for each graph or *any* path for each graph"))
.get_matches();
let start_time = PreciseTime::now();
let start: usize = matches.value_of("start").unwrap().parse().expect("Could not parse start value");
let limit: usize = matches.value_of("end").unwrap().parse().expect("Could not parse end value");
let method = match matches.value_of("find").unwrap() {
"any" => Method::Any,
"all" => Method::All,
_ => panic!(),
};
let mut g = init_square_sum_path(limit);
let s: Vec<usize> = squares().take_while(|&x| x <= (limit * 2) - 1).collect();
// Prime the graph up to the start of the search
for _ in 1..start {
add_square_sum_node(&mut g, &s);
}
let mut ham = None; // Cache for previous loop's path
match method {
Method::All => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
let paths = find_all_paths(&g);
if!paths.is_empty() {
let next_num = g.node_count() + 1;
let relevant_squares: Vec<_> = squares()
.skip_while(|&sq| sq <= next_num)
.take_while(|&sq| sq <= (next_num * 2) - 1)
.collect();
let magic_paths: Vec<_> = paths
.iter()
.filter(|&p| {
relevant_squares
.iter()
.any(|sq| *p.first().unwrap() == sq - next_num || *p.last().unwrap() == sq - next_num)
})
.collect();
if magic_paths.is_empty() {
println!("{} has no magic paths", g.node_count());
} else {
println!("{} has {} magic paths", g.node_count(), magic_paths.len());
}
}
}
},
Method::Any => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
ham = find_any_path(&g, ham);
}
}
}
let end_time = PreciseTime::now();
println!("{} seconds.", start_time.to(end_time).num_seconds());
}
fn find_any_path<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
ham: Option<Vec<usize>>,
) -> Option<Vec<usize>>
where
Ty: petgraph::EdgeType,
{
match find_hamiltonian(g, ham) {
Ok(h) => Some(h),
Err(e) => {
println!("{} fails with {}", g.node_count(), e);
None
}
}
}
fn find_all_paths<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> HashSet<std::vec::Vec<usize>>
where
Ty: petgraph::EdgeType,
{
let mut tries = 0;
let mut failed_tries = 0;
let mut paths = HashSet::new();
loop {
tries += 1;
let ham = match find_hamiltonian(g, None) {
Ok(h) => Some(h),
Err(_) => None,
};
if let Some(mut p) = ham.clone() {
if p.first().unwrap() > p.last().unwrap() {
p.reverse();
}
if paths.insert(p) {
failed_tries = 0;
} else {
failed_tries += 1;
}
} else {
failed_tries += 1;
}
if failed_tries > max(3, (tries as f32 * 0.7) as usize) {
break;
}
}
println!(
"{} has {} paths from {} tries",
g.node_count(),
paths.len(),
tries
);
paths
}
fn integers() -> std::ops::Range<usize> {
1..usize::max_value()
}
fn squares() -> std::iter::Map<std::ops::Range<usize>, fn(usize) -> usize> {
integers().map(|x| x * x)
}
fn init_square_sum_path(n: usize) -> petgraph::Graph<(), (), petgraph::Undirected, usize> {
let num_edges: usize = integers()
.take(n)
.map(|i| {
f64::floor(f64::sqrt(((i * 2) - 1) as f64)) as usize
- f64::floor(f64::sqrt(i as f64)) as usize
})
.sum();
petgraph::Graph::with_capacity(n, num_edges)
}
fn | (
g: &mut petgraph::Graph<(), (), petgraph::Undirected, usize>,
square_numbers: &[usize],
) {
let i = g.node_count() + 1;
g.add_node(());
for sq in square_numbers
.iter()
.skip_while(|&sq| sq <= &i)
.take_while(|&sq| sq <= &((i * 2) - 1))
{
let i_index = petgraph::graph::node_index(i - 1);
let j_index = petgraph::graph::node_index(sq - i - 1);
g.update_edge(i_index, j_index, ());
}
}
struct Path {
path: Vec<usize>,
member: Vec<bool>,
}
impl Path {
fn new(size: usize) -> Path {
Path {
path: Vec::with_capacity(size),
member: vec![false; size],
}
}
fn from_seed(seed: &[usize], size: usize) -> Path {
// TODO check that size >= seed.len()
let mut path = Vec::with_capacity(size);
let mut member = vec![false; size];
for i in seed.iter() {
path.push(i - 1);
member[*i - 1] = true;
}
Path { path, member }
}
fn push(&mut self, node_index: usize) {
self.path.push(node_index);
self.member[node_index] = true;
}
fn len(&self) -> usize {
self.path.len()
}
fn contains(&self, node_index: usize) -> bool {
self.member[node_index]
}
fn backtrack(&mut self, amount: usize) {
let actual_backtrack_amount = min(amount, self.path.len() - 2);
for i in &self.path[(self.path.len() - actual_backtrack_amount)..] {
self.member[*i] = false;
}
let new_size = self.path.len() - actual_backtrack_amount;
self.path.truncate(new_size);
}
fn reverse(&mut self) {
self.path.reverse();
}
fn iter(&self) -> std::slice::Iter<usize> {
self.path.iter()
}
}
fn setup_path<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> Result<Path, &'static str>
where
Ty: petgraph::EdgeType,
{
let mut rng = rand::thread_rng();
let start = petgraph::graph::node_index(rng.gen_range(0, g.node_count()));
let neighbours = g.neighbors(start).collect::<Vec<_>>();
let next = rng.choose(&neighbours).ok_or("Node had no neighbours!")?;
let mut path = Path::new(g.node_count());
path.push(start.index());
path.push(next.index());
Ok(path)
}
fn find_hamiltonian<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
seed: Option<Vec<usize>>,
) -> Result<Vec<usize>, &'static str>
where
Ty: petgraph::EdgeType,
{
if petgraph::algo::connected_components(&g)!= 1 {
return Err("Not a fully-connected graph");
}
let reverse_rate = max(100, g.node_count() / 1000);
let backtrack_rate = max(1000, g.node_count() / 100);
let backtrack_amount = max(5, g.node_count() / 10_000);
let reset_rate = g.node_count() * 10; // Must be larger than num nodes
let max_iterations = reset_rate * 5;
let mut rng = rand::thread_rng();
let mut path = match seed {
Some(s) => Path::from_seed(&s, g.node_count()),
None => setup_path(g)?,
};
let mut longest_path: Vec<usize> = Vec::with_capacity(g.node_count());
let mut iteration = 0;
let mut resets = 0;
loop {
// Reverse the path often
if iteration % reverse_rate == 0 {
path.reverse();
}
// Reset the search occasionally
if iteration > reset_rate {
iteration = 1;
resets += 1;
path = setup_path(g)?;
continue;
}
// Backtrack a smidge now and again
if iteration % backtrack_rate == 0 {
path.backtrack(backtrack_amount);
}
// Current vertex is `v`
let v = *path.path
.last()
.ok_or("There should be at least one node in the path")?;
// Create list of possible next vertices
let possible_next_nodes: Vec<_> = g.neighbors(v.into())
.filter(|n|!path.contains(n.index()))
.collect();
// If there are any, choose one randomly and add it to the path
if let Some(v) = rng.choose(&possible_next_nodes) {
path.push(v.index());
} else {
// but we have a new longest path anyway, so set `longest_path`
if path.len() > longest_path.len() {
longest_path = path.path.clone();
}
// choose any neighbour, `n`, of `v` (which must already be in `path`) and reverse path from `n` (not including n) to `v`
let previous_node = path.path[path.len() - 2];
let possible_pivots: Vec<_> = g.neighbors(v.into())
.filter(|n| n.index()!= previous_node)
.collect();
if let Some(pivot) = rng.choose(&possible_pivots) {
let pivot_pos = path.iter()
.position(|&v| v == pivot.index())
.ok_or("Pivot must be in the path")?;
path.path[pivot_pos + 1..].reverse();
}
}
// If we've found all nodes, return
if path.len() == g.node_count() {
return Ok(path.iter().map(|&a| a + 1).collect());
}
// If we've 'timed out', fail
if resets * reset_rate > max_iterations {
return Err("Timeout");
}
iteration += 1;
}
}
fn check_sum_squares(vals: &[usize]) -> bool {
let s: Vec<usize> = squares()
.take_while(|&x| x <= (vals.len() * 2) - 1)
.collect();
vals.iter()
.zip(vals.iter().skip(1))
.all(|(&a, &b)| s.contains(&(a + b)))
}
| add_square_sum_node | identifier_name |
main.rs | extern crate petgraph;
extern crate rand;
extern crate time;
extern crate clap;
use std::cmp::{max, min};
use std::collections::HashSet;
use rand::Rng;
use time::PreciseTime;
enum Method {
Any,
All,
}
fn main() {
let matches = clap::App::new("square-sum")
.about("Calculates solutions to the square sum problem")
.author("Matt Williams")
.arg(clap::Arg::with_name("start")
.short("s")
.long("start")
.value_name("N")
.default_value("0")
.help("The start of the sequence to calculate"))
.arg(clap::Arg::with_name("end")
.short("e")
.long("end")
.value_name("N")
.default_value("100")
.help("The end of the sequence to calculate (exclusive)"))
.arg(clap::Arg::with_name("find")
.short("f")
.long("find")
.value_name("METHOD")
.default_value("any")
.possible_values(&["any", "all"])
.help("Whether to find *all* paths for each graph or *any* path for each graph"))
.get_matches();
let start_time = PreciseTime::now();
let start: usize = matches.value_of("start").unwrap().parse().expect("Could not parse start value");
let limit: usize = matches.value_of("end").unwrap().parse().expect("Could not parse end value");
let method = match matches.value_of("find").unwrap() {
"any" => Method::Any,
"all" => Method::All,
_ => panic!(),
};
let mut g = init_square_sum_path(limit);
let s: Vec<usize> = squares().take_while(|&x| x <= (limit * 2) - 1).collect();
// Prime the graph up to the start of the search
for _ in 1..start {
add_square_sum_node(&mut g, &s);
}
let mut ham = None; // Cache for previous loop's path
match method {
Method::All => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
let paths = find_all_paths(&g);
if!paths.is_empty() {
let next_num = g.node_count() + 1;
let relevant_squares: Vec<_> = squares()
.skip_while(|&sq| sq <= next_num)
.take_while(|&sq| sq <= (next_num * 2) - 1)
.collect();
let magic_paths: Vec<_> = paths
.iter()
.filter(|&p| {
relevant_squares
.iter()
.any(|sq| *p.first().unwrap() == sq - next_num || *p.last().unwrap() == sq - next_num)
})
.collect();
if magic_paths.is_empty() {
println!("{} has no magic paths", g.node_count());
} else {
println!("{} has {} magic paths", g.node_count(), magic_paths.len());
}
}
}
},
Method::Any => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
ham = find_any_path(&g, ham);
}
}
}
let end_time = PreciseTime::now();
println!("{} seconds.", start_time.to(end_time).num_seconds());
}
fn find_any_path<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
ham: Option<Vec<usize>>,
) -> Option<Vec<usize>>
where
Ty: petgraph::EdgeType,
{
match find_hamiltonian(g, ham) {
Ok(h) => Some(h),
Err(e) => {
println!("{} fails with {}", g.node_count(), e);
None
}
}
}
fn find_all_paths<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> HashSet<std::vec::Vec<usize>>
where
Ty: petgraph::EdgeType,
{
let mut tries = 0;
let mut failed_tries = 0;
let mut paths = HashSet::new();
loop {
tries += 1;
let ham = match find_hamiltonian(g, None) {
Ok(h) => Some(h),
Err(_) => None,
};
if let Some(mut p) = ham.clone() {
if p.first().unwrap() > p.last().unwrap() {
p.reverse();
}
if paths.insert(p) {
failed_tries = 0;
} else {
failed_tries += 1;
}
} else {
failed_tries += 1;
}
if failed_tries > max(3, (tries as f32 * 0.7) as usize) {
break;
}
}
println!(
"{} has {} paths from {} tries",
g.node_count(),
paths.len(),
tries
);
paths
}
fn integers() -> std::ops::Range<usize> {
1..usize::max_value()
}
fn squares() -> std::iter::Map<std::ops::Range<usize>, fn(usize) -> usize> {
integers().map(|x| x * x)
}
fn init_square_sum_path(n: usize) -> petgraph::Graph<(), (), petgraph::Undirected, usize> {
let num_edges: usize = integers()
.take(n)
.map(|i| {
f64::floor(f64::sqrt(((i * 2) - 1) as f64)) as usize
- f64::floor(f64::sqrt(i as f64)) as usize
})
.sum();
petgraph::Graph::with_capacity(n, num_edges)
}
fn add_square_sum_node(
g: &mut petgraph::Graph<(), (), petgraph::Undirected, usize>,
square_numbers: &[usize],
) {
let i = g.node_count() + 1;
g.add_node(());
for sq in square_numbers
.iter()
.skip_while(|&sq| sq <= &i)
.take_while(|&sq| sq <= &((i * 2) - 1))
{
let i_index = petgraph::graph::node_index(i - 1);
let j_index = petgraph::graph::node_index(sq - i - 1);
g.update_edge(i_index, j_index, ());
}
}
struct Path {
path: Vec<usize>,
member: Vec<bool>,
}
impl Path {
fn new(size: usize) -> Path {
Path {
path: Vec::with_capacity(size),
member: vec![false; size],
}
}
fn from_seed(seed: &[usize], size: usize) -> Path |
fn push(&mut self, node_index: usize) {
self.path.push(node_index);
self.member[node_index] = true;
}
fn len(&self) -> usize {
self.path.len()
}
fn contains(&self, node_index: usize) -> bool {
self.member[node_index]
}
fn backtrack(&mut self, amount: usize) {
let actual_backtrack_amount = min(amount, self.path.len() - 2);
for i in &self.path[(self.path.len() - actual_backtrack_amount)..] {
self.member[*i] = false;
}
let new_size = self.path.len() - actual_backtrack_amount;
self.path.truncate(new_size);
}
fn reverse(&mut self) {
self.path.reverse();
}
fn iter(&self) -> std::slice::Iter<usize> {
self.path.iter()
}
}
fn setup_path<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> Result<Path, &'static str>
where
Ty: petgraph::EdgeType,
{
let mut rng = rand::thread_rng();
let start = petgraph::graph::node_index(rng.gen_range(0, g.node_count()));
let neighbours = g.neighbors(start).collect::<Vec<_>>();
let next = rng.choose(&neighbours).ok_or("Node had no neighbours!")?;
let mut path = Path::new(g.node_count());
path.push(start.index());
path.push(next.index());
Ok(path)
}
fn find_hamiltonian<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
seed: Option<Vec<usize>>,
) -> Result<Vec<usize>, &'static str>
where
Ty: petgraph::EdgeType,
{
if petgraph::algo::connected_components(&g)!= 1 {
return Err("Not a fully-connected graph");
}
let reverse_rate = max(100, g.node_count() / 1000);
let backtrack_rate = max(1000, g.node_count() / 100);
let backtrack_amount = max(5, g.node_count() / 10_000);
let reset_rate = g.node_count() * 10; // Must be larger than num nodes
let max_iterations = reset_rate * 5;
let mut rng = rand::thread_rng();
let mut path = match seed {
Some(s) => Path::from_seed(&s, g.node_count()),
None => setup_path(g)?,
};
let mut longest_path: Vec<usize> = Vec::with_capacity(g.node_count());
let mut iteration = 0;
let mut resets = 0;
loop {
// Reverse the path often
if iteration % reverse_rate == 0 {
path.reverse();
}
// Reset the search occasionally
if iteration > reset_rate {
iteration = 1;
resets += 1;
path = setup_path(g)?;
continue;
}
// Backtrack a smidge now and again
if iteration % backtrack_rate == 0 {
path.backtrack(backtrack_amount);
}
// Current vertex is `v`
let v = *path.path
.last()
.ok_or("There should be at least one node in the path")?;
// Create list of possible next vertices
let possible_next_nodes: Vec<_> = g.neighbors(v.into())
.filter(|n|!path.contains(n.index()))
.collect();
// If there are any, choose one randomly and add it to the path
if let Some(v) = rng.choose(&possible_next_nodes) {
path.push(v.index());
} else {
// but we have a new longest path anyway, so set `longest_path`
if path.len() > longest_path.len() {
longest_path = path.path.clone();
}
// choose any neighbour, `n`, of `v` (which must already be in `path`) and reverse path from `n` (not including n) to `v`
let previous_node = path.path[path.len() - 2];
let possible_pivots: Vec<_> = g.neighbors(v.into())
.filter(|n| n.index()!= previous_node)
.collect();
if let Some(pivot) = rng.choose(&possible_pivots) {
let pivot_pos = path.iter()
.position(|&v| v == pivot.index())
.ok_or("Pivot must be in the path")?;
path.path[pivot_pos + 1..].reverse();
}
}
// If we've found all nodes, return
if path.len() == g.node_count() {
return Ok(path.iter().map(|&a| a + 1).collect());
}
// If we've 'timed out', fail
if resets * reset_rate > max_iterations {
return Err("Timeout");
}
iteration += 1;
}
}
fn check_sum_squares(vals: &[usize]) -> bool {
let s: Vec<usize> = squares()
.take_while(|&x| x <= (vals.len() * 2) - 1)
.collect();
vals.iter()
.zip(vals.iter().skip(1))
.all(|(&a, &b)| s.contains(&(a + b)))
}
| {
// TODO check that size >= seed.len()
let mut path = Vec::with_capacity(size);
let mut member = vec![false; size];
for i in seed.iter() {
path.push(i - 1);
member[*i - 1] = true;
}
Path { path, member }
} | identifier_body |
main.rs | extern crate petgraph;
extern crate rand;
extern crate time;
extern crate clap;
use std::cmp::{max, min};
use std::collections::HashSet;
use rand::Rng;
use time::PreciseTime;
enum Method {
Any,
All,
}
fn main() {
let matches = clap::App::new("square-sum")
.about("Calculates solutions to the square sum problem")
.author("Matt Williams")
.arg(clap::Arg::with_name("start")
.short("s")
.long("start")
.value_name("N")
.default_value("0")
.help("The start of the sequence to calculate"))
.arg(clap::Arg::with_name("end")
.short("e")
.long("end")
.value_name("N")
.default_value("100")
.help("The end of the sequence to calculate (exclusive)"))
.arg(clap::Arg::with_name("find")
.short("f")
.long("find")
.value_name("METHOD")
.default_value("any")
.possible_values(&["any", "all"])
.help("Whether to find *all* paths for each graph or *any* path for each graph"))
.get_matches();
let start_time = PreciseTime::now();
let start: usize = matches.value_of("start").unwrap().parse().expect("Could not parse start value");
let limit: usize = matches.value_of("end").unwrap().parse().expect("Could not parse end value");
let method = match matches.value_of("find").unwrap() {
"any" => Method::Any,
"all" => Method::All,
_ => panic!(),
};
let mut g = init_square_sum_path(limit);
let s: Vec<usize> = squares().take_while(|&x| x <= (limit * 2) - 1).collect();
// Prime the graph up to the start of the search
for _ in 1..start {
add_square_sum_node(&mut g, &s);
}
let mut ham = None; // Cache for previous loop's path
match method {
Method::All => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
let paths = find_all_paths(&g);
if!paths.is_empty() {
let next_num = g.node_count() + 1;
let relevant_squares: Vec<_> = squares()
.skip_while(|&sq| sq <= next_num)
.take_while(|&sq| sq <= (next_num * 2) - 1)
.collect();
let magic_paths: Vec<_> = paths
.iter()
.filter(|&p| {
relevant_squares
.iter()
.any(|sq| *p.first().unwrap() == sq - next_num || *p.last().unwrap() == sq - next_num)
})
.collect();
if magic_paths.is_empty() {
println!("{} has no magic paths", g.node_count());
} else {
println!("{} has {} magic paths", g.node_count(), magic_paths.len());
}
}
}
},
Method::Any => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
ham = find_any_path(&g, ham);
}
}
}
let end_time = PreciseTime::now();
println!("{} seconds.", start_time.to(end_time).num_seconds());
}
fn find_any_path<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
ham: Option<Vec<usize>>,
) -> Option<Vec<usize>>
where
Ty: petgraph::EdgeType,
{
match find_hamiltonian(g, ham) {
Ok(h) => Some(h),
Err(e) => {
println!("{} fails with {}", g.node_count(), e);
None
}
}
}
fn find_all_paths<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> HashSet<std::vec::Vec<usize>>
where
Ty: petgraph::EdgeType,
{
let mut tries = 0;
let mut failed_tries = 0;
let mut paths = HashSet::new();
loop {
tries += 1;
let ham = match find_hamiltonian(g, None) {
Ok(h) => Some(h),
Err(_) => None,
};
if let Some(mut p) = ham.clone() {
if p.first().unwrap() > p.last().unwrap() {
p.reverse();
}
if paths.insert(p) {
failed_tries = 0;
} else {
failed_tries += 1;
}
} else {
failed_tries += 1;
}
if failed_tries > max(3, (tries as f32 * 0.7) as usize) {
break;
}
}
println!(
"{} has {} paths from {} tries",
g.node_count(),
paths.len(),
tries
);
paths
}
fn integers() -> std::ops::Range<usize> {
1..usize::max_value()
}
fn squares() -> std::iter::Map<std::ops::Range<usize>, fn(usize) -> usize> {
integers().map(|x| x * x)
}
fn init_square_sum_path(n: usize) -> petgraph::Graph<(), (), petgraph::Undirected, usize> {
let num_edges: usize = integers()
.take(n)
.map(|i| {
f64::floor(f64::sqrt(((i * 2) - 1) as f64)) as usize
- f64::floor(f64::sqrt(i as f64)) as usize
})
.sum();
petgraph::Graph::with_capacity(n, num_edges)
}
fn add_square_sum_node(
g: &mut petgraph::Graph<(), (), petgraph::Undirected, usize>,
square_numbers: &[usize],
) {
let i = g.node_count() + 1;
g.add_node(());
for sq in square_numbers
.iter()
.skip_while(|&sq| sq <= &i)
.take_while(|&sq| sq <= &((i * 2) - 1))
{
let i_index = petgraph::graph::node_index(i - 1);
let j_index = petgraph::graph::node_index(sq - i - 1);
g.update_edge(i_index, j_index, ());
}
}
struct Path {
path: Vec<usize>,
member: Vec<bool>,
}
impl Path {
fn new(size: usize) -> Path {
Path {
path: Vec::with_capacity(size),
member: vec![false; size],
}
}
fn from_seed(seed: &[usize], size: usize) -> Path {
// TODO check that size >= seed.len()
let mut path = Vec::with_capacity(size);
let mut member = vec![false; size];
for i in seed.iter() {
path.push(i - 1);
member[*i - 1] = true;
}
Path { path, member }
}
fn push(&mut self, node_index: usize) {
self.path.push(node_index);
self.member[node_index] = true;
}
fn len(&self) -> usize {
self.path.len()
}
fn contains(&self, node_index: usize) -> bool {
self.member[node_index]
}
fn backtrack(&mut self, amount: usize) {
let actual_backtrack_amount = min(amount, self.path.len() - 2);
for i in &self.path[(self.path.len() - actual_backtrack_amount)..] {
self.member[*i] = false;
}
let new_size = self.path.len() - actual_backtrack_amount; | fn reverse(&mut self) {
self.path.reverse();
}
fn iter(&self) -> std::slice::Iter<usize> {
self.path.iter()
}
}
fn setup_path<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> Result<Path, &'static str>
where
Ty: petgraph::EdgeType,
{
let mut rng = rand::thread_rng();
let start = petgraph::graph::node_index(rng.gen_range(0, g.node_count()));
let neighbours = g.neighbors(start).collect::<Vec<_>>();
let next = rng.choose(&neighbours).ok_or("Node had no neighbours!")?;
let mut path = Path::new(g.node_count());
path.push(start.index());
path.push(next.index());
Ok(path)
}
fn find_hamiltonian<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
seed: Option<Vec<usize>>,
) -> Result<Vec<usize>, &'static str>
where
Ty: petgraph::EdgeType,
{
if petgraph::algo::connected_components(&g)!= 1 {
return Err("Not a fully-connected graph");
}
let reverse_rate = max(100, g.node_count() / 1000);
let backtrack_rate = max(1000, g.node_count() / 100);
let backtrack_amount = max(5, g.node_count() / 10_000);
let reset_rate = g.node_count() * 10; // Must be larger than num nodes
let max_iterations = reset_rate * 5;
let mut rng = rand::thread_rng();
let mut path = match seed {
Some(s) => Path::from_seed(&s, g.node_count()),
None => setup_path(g)?,
};
let mut longest_path: Vec<usize> = Vec::with_capacity(g.node_count());
let mut iteration = 0;
let mut resets = 0;
loop {
// Reverse the path often
if iteration % reverse_rate == 0 {
path.reverse();
}
// Reset the search occasionally
if iteration > reset_rate {
iteration = 1;
resets += 1;
path = setup_path(g)?;
continue;
}
// Backtrack a smidge now and again
if iteration % backtrack_rate == 0 {
path.backtrack(backtrack_amount);
}
// Current vertex is `v`
let v = *path.path
.last()
.ok_or("There should be at least one node in the path")?;
// Create list of possible next vertices
let possible_next_nodes: Vec<_> = g.neighbors(v.into())
.filter(|n|!path.contains(n.index()))
.collect();
// If there are any, choose one randomly and add it to the path
if let Some(v) = rng.choose(&possible_next_nodes) {
path.push(v.index());
} else {
// but we have a new longest path anyway, so set `longest_path`
if path.len() > longest_path.len() {
longest_path = path.path.clone();
}
// choose any neighbour, `n`, of `v` (which must already be in `path`) and reverse path from `n` (not including n) to `v`
let previous_node = path.path[path.len() - 2];
let possible_pivots: Vec<_> = g.neighbors(v.into())
.filter(|n| n.index()!= previous_node)
.collect();
if let Some(pivot) = rng.choose(&possible_pivots) {
let pivot_pos = path.iter()
.position(|&v| v == pivot.index())
.ok_or("Pivot must be in the path")?;
path.path[pivot_pos + 1..].reverse();
}
}
// If we've found all nodes, return
if path.len() == g.node_count() {
return Ok(path.iter().map(|&a| a + 1).collect());
}
// If we've 'timed out', fail
if resets * reset_rate > max_iterations {
return Err("Timeout");
}
iteration += 1;
}
}
fn check_sum_squares(vals: &[usize]) -> bool {
let s: Vec<usize> = squares()
.take_while(|&x| x <= (vals.len() * 2) - 1)
.collect();
vals.iter()
.zip(vals.iter().skip(1))
.all(|(&a, &b)| s.contains(&(a + b)))
} | self.path.truncate(new_size);
}
| random_line_split |
lib.rs | mod utils;
use std::cell::RefCell;
use std::rc::Rc;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use web_sys::{ImageData, WebGlProgram, WebGlRenderingContext, WebGlShader};
const WIDTH: i32 = 128;
const HEIGHT: i32 = 128;
const CHANNELS: i32 = 4;
const BUFFER_SIZE: usize = ((WIDTH * HEIGHT) * CHANNELS) as usize;
static mut PIXEL_DATA: [u8; BUFFER_SIZE] = [255; BUFFER_SIZE];
static mut PIXEL_DATA_UPDATING: bool = false;
static mut PIXEL_DATA_UPDATED: bool = false;
// static mut CLIENT_READY: bool = false;
const VERTICES: [f32; 18] = [
-1.0, -1.0, 0.0, // Bottom left
1.0, -1.0, 0.0, // Bottem right
1.0, 1.0, 0.0, // Top right
-1.0, -1.0, 0.0, // Bottom left
1.0, 1.0, 0.0, // Top right
-1.0, 1.0, 0.0, // Top left
];
fn window() -> web_sys::Window {
web_sys::window().expect("no global `window` exists")
}
fn request_animation_frame(f: &Closure<dyn FnMut()>) {
window()
.request_animation_frame(f.as_ref().unchecked_ref())
.expect("should register `requestAnimationFrame` OK");
}
#[wasm_bindgen(start)]
pub fn start() {
utils::set_panic_hook();
log!("Hello there! Compositor canvas starting/loading");
}
#[wasm_bindgen]
pub fn initialise(element_id: String) -> Result<(), JsValue> {
log!(
"Compositor canvas (element_id: String = `{}`) initialisation",
&element_id
);
let document = web_sys::window().unwrap().document().unwrap();
let canvas = document.get_element_by_id(&element_id).unwrap();
let canvas: web_sys::HtmlCanvasElement = canvas.dyn_into::<web_sys::HtmlCanvasElement>()?;
let context = canvas
.get_context("webgl")?
.unwrap()
.dyn_into::<WebGlRenderingContext>()?;
let vert_shader = compile_shader(
&context,
WebGlRenderingContext::VERTEX_SHADER,
r#"
attribute vec4 position;
attribute vec2 textureCoord;
varying highp vec2 vTextureCoord;
void main(void) {
gl_Position = position;
vTextureCoord = textureCoord;
}
"#,
)?;
let frag_shader = compile_shader(
&context,
WebGlRenderingContext::FRAGMENT_SHADER,
r#"
varying highp vec2 vTextureCoord;
uniform sampler2D image;
void main(void) {
gl_FragColor = texture2D(image, vTextureCoord);
gl_FragColor = vec4(gl_FragColor.b, gl_FragColor.g, gl_FragColor.r, gl_FragColor.a);
}
"#,
)?;
let program = link_program(&context, &vert_shader, &frag_shader)?;
let position_location = context.get_attrib_location(&program, "position");
let texcoord_location = context.get_attrib_location(&program, "textureCoord");
let texture_location = context.get_uniform_location(&program, "image"); //.unwrap();
// Bind shader
context.use_program(Some(&program));
// Build model
let vertex_buffer = context
.create_buffer()
.ok_or("failed to create vertex buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&vertex_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let vert_array = js_sys::Float32Array::view(&VERTICES);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&vert_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
position_location as u32,
3,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(position_location as u32);
// Add uvs
let uvs: [f32; 12] = [
0.0, 1.0, // Bottom left
1.0, 1.0, // Bottem right
1.0, 0.0, // Top right
0.0, 1.0, // Bottom left
1.0, 0.0, // Top right
0.0, 0.0, // Top left
];
let uv_buffer = context
.create_buffer()
.ok_or("failed to create uv buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&uv_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let uv_array = js_sys::Float32Array::view(&uvs);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&uv_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
texcoord_location as u32,
2,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(texcoord_location as u32);
// Create a texture
let texture = context.create_texture();
context.bind_texture(WebGlRenderingContext::TEXTURE_2D, texture.as_ref());
unsafe {
context
.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_u8_array(
//context.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_array_buffer_view(
WebGlRenderingContext::TEXTURE_2D,
0,
WebGlRenderingContext::RGBA as i32,
WIDTH,
HEIGHT,
0,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should create GPU memory OK");
}
context.generate_mipmap(WebGlRenderingContext::TEXTURE_2D);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_S,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_T,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_MAG_FILTER,
WebGlRenderingContext::LINEAR as i32,
);
context.uniform1i(Some(texture_location.unwrap().as_ref()), 0);
// draw()
context.clear_color(0.0, 0.0, 0.0, 1.0);
context.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
context.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
input_data_update_loop(context, texture.unwrap());
// Fin
Ok(())
}
pub fn input_data_update_loop(gl: WebGlRenderingContext, texture: web_sys::WebGlTexture) {
let f = Rc::new(RefCell::new(None));
let g = f.clone();
{
*g.borrow_mut() = Some(Closure::wrap(Box::new(move || {
gl.bind_texture(WebGlRenderingContext::TEXTURE_2D, Some(&texture));
unsafe {
if PIXEL_DATA_UPDATED == true {
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
WebGlRenderingContext::TEXTURE_2D,
0,
0,
0,
WIDTH,
HEIGHT,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should update GPU memory OK");
PIXEL_DATA_UPDATED = false;
}
}
gl.clear_color(0.0, 0.0, 0.0, 1.0);
gl.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
gl.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
//update_texture_and_draw(gl, texture, texture_location);
request_animation_frame(f.borrow().as_ref().unwrap());
}) as Box<dyn FnMut()>));
}
request_animation_frame(g.borrow().as_ref().unwrap());
}
pub fn compile_shader(
context: &WebGlRenderingContext,
shader_type: u32,
source: &str,
) -> Result<WebGlShader, String> {
let shader = context
.create_shader(shader_type)
.ok_or_else(|| String::from("Unable to create shader object"))?;
context.shader_source(&shader, source);
context.compile_shader(&shader);
if context
.get_shader_parameter(&shader, WebGlRenderingContext::COMPILE_STATUS)
.as_bool()
.unwrap_or(false)
| else {
Err(context
.get_shader_info_log(&shader)
.unwrap_or_else(|| String::from("Unknown error creating shader")))
}
}
pub fn link_program(
context: &WebGlRenderingContext,
vert_shader: &WebGlShader,
frag_shader: &WebGlShader,
) -> Result<WebGlProgram, String> {
let program = context
.create_program()
.ok_or_else(|| String::from("Unable to create shader object"))?;
context.attach_shader(&program, vert_shader);
context.attach_shader(&program, frag_shader);
context.link_program(&program);
if context
.get_program_parameter(&program, WebGlRenderingContext::LINK_STATUS)
.as_bool()
.unwrap_or(false)
{
Ok(program)
} else {
Err(context
.get_program_info_log(&program)
.unwrap_or_else(|| String::from("Unknown error creating program object")))
}
}
#[wasm_bindgen]
pub fn copy(data: &ImageData) -> Result<(), JsValue> {
unsafe {
// TODO use mutex
if PIXEL_DATA_UPDATED == false && PIXEL_DATA_UPDATING == false {
PIXEL_DATA_UPDATING = true;
for i in 0..BUFFER_SIZE {
PIXEL_DATA[i] = data.data()[i];
}
PIXEL_DATA_UPDATING = false;
PIXEL_DATA_UPDATED = true;
}
}
Ok(())
}
| {
Ok(shader)
} | conditional_block |
lib.rs | mod utils;
use std::cell::RefCell;
use std::rc::Rc;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use web_sys::{ImageData, WebGlProgram, WebGlRenderingContext, WebGlShader};
const WIDTH: i32 = 128;
const HEIGHT: i32 = 128;
const CHANNELS: i32 = 4;
const BUFFER_SIZE: usize = ((WIDTH * HEIGHT) * CHANNELS) as usize;
static mut PIXEL_DATA: [u8; BUFFER_SIZE] = [255; BUFFER_SIZE];
static mut PIXEL_DATA_UPDATING: bool = false;
static mut PIXEL_DATA_UPDATED: bool = false;
// static mut CLIENT_READY: bool = false;
const VERTICES: [f32; 18] = [
-1.0, -1.0, 0.0, // Bottom left
1.0, -1.0, 0.0, // Bottem right
1.0, 1.0, 0.0, // Top right
-1.0, -1.0, 0.0, // Bottom left
1.0, 1.0, 0.0, // Top right
-1.0, 1.0, 0.0, // Top left
];
fn window() -> web_sys::Window {
web_sys::window().expect("no global `window` exists")
}
fn request_animation_frame(f: &Closure<dyn FnMut()>) {
window()
.request_animation_frame(f.as_ref().unchecked_ref()) | pub fn start() {
utils::set_panic_hook();
log!("Hello there! Compositor canvas starting/loading");
}
#[wasm_bindgen]
pub fn initialise(element_id: String) -> Result<(), JsValue> {
log!(
"Compositor canvas (element_id: String = `{}`) initialisation",
&element_id
);
let document = web_sys::window().unwrap().document().unwrap();
let canvas = document.get_element_by_id(&element_id).unwrap();
let canvas: web_sys::HtmlCanvasElement = canvas.dyn_into::<web_sys::HtmlCanvasElement>()?;
let context = canvas
.get_context("webgl")?
.unwrap()
.dyn_into::<WebGlRenderingContext>()?;
let vert_shader = compile_shader(
&context,
WebGlRenderingContext::VERTEX_SHADER,
r#"
attribute vec4 position;
attribute vec2 textureCoord;
varying highp vec2 vTextureCoord;
void main(void) {
gl_Position = position;
vTextureCoord = textureCoord;
}
"#,
)?;
let frag_shader = compile_shader(
&context,
WebGlRenderingContext::FRAGMENT_SHADER,
r#"
varying highp vec2 vTextureCoord;
uniform sampler2D image;
void main(void) {
gl_FragColor = texture2D(image, vTextureCoord);
gl_FragColor = vec4(gl_FragColor.b, gl_FragColor.g, gl_FragColor.r, gl_FragColor.a);
}
"#,
)?;
let program = link_program(&context, &vert_shader, &frag_shader)?;
let position_location = context.get_attrib_location(&program, "position");
let texcoord_location = context.get_attrib_location(&program, "textureCoord");
let texture_location = context.get_uniform_location(&program, "image"); //.unwrap();
// Bind shader
context.use_program(Some(&program));
// Build model
let vertex_buffer = context
.create_buffer()
.ok_or("failed to create vertex buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&vertex_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let vert_array = js_sys::Float32Array::view(&VERTICES);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&vert_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
position_location as u32,
3,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(position_location as u32);
// Add uvs
let uvs: [f32; 12] = [
0.0, 1.0, // Bottom left
1.0, 1.0, // Bottem right
1.0, 0.0, // Top right
0.0, 1.0, // Bottom left
1.0, 0.0, // Top right
0.0, 0.0, // Top left
];
let uv_buffer = context
.create_buffer()
.ok_or("failed to create uv buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&uv_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let uv_array = js_sys::Float32Array::view(&uvs);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&uv_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
texcoord_location as u32,
2,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(texcoord_location as u32);
// Create a texture
let texture = context.create_texture();
context.bind_texture(WebGlRenderingContext::TEXTURE_2D, texture.as_ref());
unsafe {
context
.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_u8_array(
//context.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_array_buffer_view(
WebGlRenderingContext::TEXTURE_2D,
0,
WebGlRenderingContext::RGBA as i32,
WIDTH,
HEIGHT,
0,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should create GPU memory OK");
}
context.generate_mipmap(WebGlRenderingContext::TEXTURE_2D);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_S,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_T,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_MAG_FILTER,
WebGlRenderingContext::LINEAR as i32,
);
context.uniform1i(Some(texture_location.unwrap().as_ref()), 0);
// draw()
context.clear_color(0.0, 0.0, 0.0, 1.0);
context.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
context.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
input_data_update_loop(context, texture.unwrap());
// Fin
Ok(())
}
pub fn input_data_update_loop(gl: WebGlRenderingContext, texture: web_sys::WebGlTexture) {
let f = Rc::new(RefCell::new(None));
let g = f.clone();
{
*g.borrow_mut() = Some(Closure::wrap(Box::new(move || {
gl.bind_texture(WebGlRenderingContext::TEXTURE_2D, Some(&texture));
unsafe {
if PIXEL_DATA_UPDATED == true {
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
WebGlRenderingContext::TEXTURE_2D,
0,
0,
0,
WIDTH,
HEIGHT,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should update GPU memory OK");
PIXEL_DATA_UPDATED = false;
}
}
gl.clear_color(0.0, 0.0, 0.0, 1.0);
gl.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
gl.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
//update_texture_and_draw(gl, texture, texture_location);
request_animation_frame(f.borrow().as_ref().unwrap());
}) as Box<dyn FnMut()>));
}
request_animation_frame(g.borrow().as_ref().unwrap());
}
pub fn compile_shader(
context: &WebGlRenderingContext,
shader_type: u32,
source: &str,
) -> Result<WebGlShader, String> {
let shader = context
.create_shader(shader_type)
.ok_or_else(|| String::from("Unable to create shader object"))?;
context.shader_source(&shader, source);
context.compile_shader(&shader);
if context
.get_shader_parameter(&shader, WebGlRenderingContext::COMPILE_STATUS)
.as_bool()
.unwrap_or(false)
{
Ok(shader)
} else {
Err(context
.get_shader_info_log(&shader)
.unwrap_or_else(|| String::from("Unknown error creating shader")))
}
}
pub fn link_program(
context: &WebGlRenderingContext,
vert_shader: &WebGlShader,
frag_shader: &WebGlShader,
) -> Result<WebGlProgram, String> {
let program = context
.create_program()
.ok_or_else(|| String::from("Unable to create shader object"))?;
context.attach_shader(&program, vert_shader);
context.attach_shader(&program, frag_shader);
context.link_program(&program);
if context
.get_program_parameter(&program, WebGlRenderingContext::LINK_STATUS)
.as_bool()
.unwrap_or(false)
{
Ok(program)
} else {
Err(context
.get_program_info_log(&program)
.unwrap_or_else(|| String::from("Unknown error creating program object")))
}
}
#[wasm_bindgen]
pub fn copy(data: &ImageData) -> Result<(), JsValue> {
unsafe {
// TODO use mutex
if PIXEL_DATA_UPDATED == false && PIXEL_DATA_UPDATING == false {
PIXEL_DATA_UPDATING = true;
for i in 0..BUFFER_SIZE {
PIXEL_DATA[i] = data.data()[i];
}
PIXEL_DATA_UPDATING = false;
PIXEL_DATA_UPDATED = true;
}
}
Ok(())
} | .expect("should register `requestAnimationFrame` OK");
}
#[wasm_bindgen(start)] | random_line_split |
lib.rs | mod utils;
use std::cell::RefCell;
use std::rc::Rc;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use web_sys::{ImageData, WebGlProgram, WebGlRenderingContext, WebGlShader};
const WIDTH: i32 = 128;
const HEIGHT: i32 = 128;
const CHANNELS: i32 = 4;
const BUFFER_SIZE: usize = ((WIDTH * HEIGHT) * CHANNELS) as usize;
static mut PIXEL_DATA: [u8; BUFFER_SIZE] = [255; BUFFER_SIZE];
static mut PIXEL_DATA_UPDATING: bool = false;
static mut PIXEL_DATA_UPDATED: bool = false;
// static mut CLIENT_READY: bool = false;
const VERTICES: [f32; 18] = [
-1.0, -1.0, 0.0, // Bottom left
1.0, -1.0, 0.0, // Bottem right
1.0, 1.0, 0.0, // Top right
-1.0, -1.0, 0.0, // Bottom left
1.0, 1.0, 0.0, // Top right
-1.0, 1.0, 0.0, // Top left
];
fn window() -> web_sys::Window {
web_sys::window().expect("no global `window` exists")
}
fn request_animation_frame(f: &Closure<dyn FnMut()>) {
window()
.request_animation_frame(f.as_ref().unchecked_ref())
.expect("should register `requestAnimationFrame` OK");
}
#[wasm_bindgen(start)]
pub fn start() {
utils::set_panic_hook();
log!("Hello there! Compositor canvas starting/loading");
}
#[wasm_bindgen]
pub fn initialise(element_id: String) -> Result<(), JsValue> {
log!(
"Compositor canvas (element_id: String = `{}`) initialisation",
&element_id
);
let document = web_sys::window().unwrap().document().unwrap();
let canvas = document.get_element_by_id(&element_id).unwrap();
let canvas: web_sys::HtmlCanvasElement = canvas.dyn_into::<web_sys::HtmlCanvasElement>()?;
let context = canvas
.get_context("webgl")?
.unwrap()
.dyn_into::<WebGlRenderingContext>()?;
let vert_shader = compile_shader(
&context,
WebGlRenderingContext::VERTEX_SHADER,
r#"
attribute vec4 position;
attribute vec2 textureCoord;
varying highp vec2 vTextureCoord;
void main(void) {
gl_Position = position;
vTextureCoord = textureCoord;
}
"#,
)?;
let frag_shader = compile_shader(
&context,
WebGlRenderingContext::FRAGMENT_SHADER,
r#"
varying highp vec2 vTextureCoord;
uniform sampler2D image;
void main(void) {
gl_FragColor = texture2D(image, vTextureCoord);
gl_FragColor = vec4(gl_FragColor.b, gl_FragColor.g, gl_FragColor.r, gl_FragColor.a);
}
"#,
)?;
let program = link_program(&context, &vert_shader, &frag_shader)?;
let position_location = context.get_attrib_location(&program, "position");
let texcoord_location = context.get_attrib_location(&program, "textureCoord");
let texture_location = context.get_uniform_location(&program, "image"); //.unwrap();
// Bind shader
context.use_program(Some(&program));
// Build model
let vertex_buffer = context
.create_buffer()
.ok_or("failed to create vertex buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&vertex_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let vert_array = js_sys::Float32Array::view(&VERTICES);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&vert_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
position_location as u32,
3,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(position_location as u32);
// Add uvs
let uvs: [f32; 12] = [
0.0, 1.0, // Bottom left
1.0, 1.0, // Bottem right
1.0, 0.0, // Top right
0.0, 1.0, // Bottom left
1.0, 0.0, // Top right
0.0, 0.0, // Top left
];
let uv_buffer = context
.create_buffer()
.ok_or("failed to create uv buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&uv_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let uv_array = js_sys::Float32Array::view(&uvs);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&uv_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
texcoord_location as u32,
2,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(texcoord_location as u32);
// Create a texture
let texture = context.create_texture();
context.bind_texture(WebGlRenderingContext::TEXTURE_2D, texture.as_ref());
unsafe {
context
.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_u8_array(
//context.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_array_buffer_view(
WebGlRenderingContext::TEXTURE_2D,
0,
WebGlRenderingContext::RGBA as i32,
WIDTH,
HEIGHT,
0,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should create GPU memory OK");
}
context.generate_mipmap(WebGlRenderingContext::TEXTURE_2D);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_S,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_T,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_MAG_FILTER,
WebGlRenderingContext::LINEAR as i32,
);
context.uniform1i(Some(texture_location.unwrap().as_ref()), 0);
// draw()
context.clear_color(0.0, 0.0, 0.0, 1.0);
context.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
context.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
input_data_update_loop(context, texture.unwrap());
// Fin
Ok(())
}
pub fn input_data_update_loop(gl: WebGlRenderingContext, texture: web_sys::WebGlTexture) {
let f = Rc::new(RefCell::new(None));
let g = f.clone();
{
*g.borrow_mut() = Some(Closure::wrap(Box::new(move || {
gl.bind_texture(WebGlRenderingContext::TEXTURE_2D, Some(&texture));
unsafe {
if PIXEL_DATA_UPDATED == true {
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
WebGlRenderingContext::TEXTURE_2D,
0,
0,
0,
WIDTH,
HEIGHT,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should update GPU memory OK");
PIXEL_DATA_UPDATED = false;
}
}
gl.clear_color(0.0, 0.0, 0.0, 1.0);
gl.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
gl.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
//update_texture_and_draw(gl, texture, texture_location);
request_animation_frame(f.borrow().as_ref().unwrap());
}) as Box<dyn FnMut()>));
}
request_animation_frame(g.borrow().as_ref().unwrap());
}
pub fn | (
context: &WebGlRenderingContext,
shader_type: u32,
source: &str,
) -> Result<WebGlShader, String> {
let shader = context
.create_shader(shader_type)
.ok_or_else(|| String::from("Unable to create shader object"))?;
context.shader_source(&shader, source);
context.compile_shader(&shader);
if context
.get_shader_parameter(&shader, WebGlRenderingContext::COMPILE_STATUS)
.as_bool()
.unwrap_or(false)
{
Ok(shader)
} else {
Err(context
.get_shader_info_log(&shader)
.unwrap_or_else(|| String::from("Unknown error creating shader")))
}
}
pub fn link_program(
context: &WebGlRenderingContext,
vert_shader: &WebGlShader,
frag_shader: &WebGlShader,
) -> Result<WebGlProgram, String> {
let program = context
.create_program()
.ok_or_else(|| String::from("Unable to create shader object"))?;
context.attach_shader(&program, vert_shader);
context.attach_shader(&program, frag_shader);
context.link_program(&program);
if context
.get_program_parameter(&program, WebGlRenderingContext::LINK_STATUS)
.as_bool()
.unwrap_or(false)
{
Ok(program)
} else {
Err(context
.get_program_info_log(&program)
.unwrap_or_else(|| String::from("Unknown error creating program object")))
}
}
#[wasm_bindgen]
pub fn copy(data: &ImageData) -> Result<(), JsValue> {
unsafe {
// TODO use mutex
if PIXEL_DATA_UPDATED == false && PIXEL_DATA_UPDATING == false {
PIXEL_DATA_UPDATING = true;
for i in 0..BUFFER_SIZE {
PIXEL_DATA[i] = data.data()[i];
}
PIXEL_DATA_UPDATING = false;
PIXEL_DATA_UPDATED = true;
}
}
Ok(())
}
| compile_shader | identifier_name |
lib.rs | mod utils;
use std::cell::RefCell;
use std::rc::Rc;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use web_sys::{ImageData, WebGlProgram, WebGlRenderingContext, WebGlShader};
const WIDTH: i32 = 128;
const HEIGHT: i32 = 128;
const CHANNELS: i32 = 4;
const BUFFER_SIZE: usize = ((WIDTH * HEIGHT) * CHANNELS) as usize;
static mut PIXEL_DATA: [u8; BUFFER_SIZE] = [255; BUFFER_SIZE];
static mut PIXEL_DATA_UPDATING: bool = false;
static mut PIXEL_DATA_UPDATED: bool = false;
// static mut CLIENT_READY: bool = false;
const VERTICES: [f32; 18] = [
-1.0, -1.0, 0.0, // Bottom left
1.0, -1.0, 0.0, // Bottem right
1.0, 1.0, 0.0, // Top right
-1.0, -1.0, 0.0, // Bottom left
1.0, 1.0, 0.0, // Top right
-1.0, 1.0, 0.0, // Top left
];
fn window() -> web_sys::Window {
web_sys::window().expect("no global `window` exists")
}
fn request_animation_frame(f: &Closure<dyn FnMut()>) |
#[wasm_bindgen(start)]
pub fn start() {
utils::set_panic_hook();
log!("Hello there! Compositor canvas starting/loading");
}
#[wasm_bindgen]
pub fn initialise(element_id: String) -> Result<(), JsValue> {
log!(
"Compositor canvas (element_id: String = `{}`) initialisation",
&element_id
);
let document = web_sys::window().unwrap().document().unwrap();
let canvas = document.get_element_by_id(&element_id).unwrap();
let canvas: web_sys::HtmlCanvasElement = canvas.dyn_into::<web_sys::HtmlCanvasElement>()?;
let context = canvas
.get_context("webgl")?
.unwrap()
.dyn_into::<WebGlRenderingContext>()?;
let vert_shader = compile_shader(
&context,
WebGlRenderingContext::VERTEX_SHADER,
r#"
attribute vec4 position;
attribute vec2 textureCoord;
varying highp vec2 vTextureCoord;
void main(void) {
gl_Position = position;
vTextureCoord = textureCoord;
}
"#,
)?;
let frag_shader = compile_shader(
&context,
WebGlRenderingContext::FRAGMENT_SHADER,
r#"
varying highp vec2 vTextureCoord;
uniform sampler2D image;
void main(void) {
gl_FragColor = texture2D(image, vTextureCoord);
gl_FragColor = vec4(gl_FragColor.b, gl_FragColor.g, gl_FragColor.r, gl_FragColor.a);
}
"#,
)?;
let program = link_program(&context, &vert_shader, &frag_shader)?;
let position_location = context.get_attrib_location(&program, "position");
let texcoord_location = context.get_attrib_location(&program, "textureCoord");
let texture_location = context.get_uniform_location(&program, "image"); //.unwrap();
// Bind shader
context.use_program(Some(&program));
// Build model
let vertex_buffer = context
.create_buffer()
.ok_or("failed to create vertex buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&vertex_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let vert_array = js_sys::Float32Array::view(&VERTICES);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&vert_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
position_location as u32,
3,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(position_location as u32);
// Add uvs
let uvs: [f32; 12] = [
0.0, 1.0, // Bottom left
1.0, 1.0, // Bottem right
1.0, 0.0, // Top right
0.0, 1.0, // Bottom left
1.0, 0.0, // Top right
0.0, 0.0, // Top left
];
let uv_buffer = context
.create_buffer()
.ok_or("failed to create uv buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&uv_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let uv_array = js_sys::Float32Array::view(&uvs);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&uv_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
texcoord_location as u32,
2,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(texcoord_location as u32);
// Create a texture
let texture = context.create_texture();
context.bind_texture(WebGlRenderingContext::TEXTURE_2D, texture.as_ref());
unsafe {
context
.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_u8_array(
//context.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_array_buffer_view(
WebGlRenderingContext::TEXTURE_2D,
0,
WebGlRenderingContext::RGBA as i32,
WIDTH,
HEIGHT,
0,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should create GPU memory OK");
}
context.generate_mipmap(WebGlRenderingContext::TEXTURE_2D);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_S,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_T,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_MAG_FILTER,
WebGlRenderingContext::LINEAR as i32,
);
context.uniform1i(Some(texture_location.unwrap().as_ref()), 0);
// draw()
context.clear_color(0.0, 0.0, 0.0, 1.0);
context.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
context.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
input_data_update_loop(context, texture.unwrap());
// Fin
Ok(())
}
pub fn input_data_update_loop(gl: WebGlRenderingContext, texture: web_sys::WebGlTexture) {
let f = Rc::new(RefCell::new(None));
let g = f.clone();
{
*g.borrow_mut() = Some(Closure::wrap(Box::new(move || {
gl.bind_texture(WebGlRenderingContext::TEXTURE_2D, Some(&texture));
unsafe {
if PIXEL_DATA_UPDATED == true {
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
WebGlRenderingContext::TEXTURE_2D,
0,
0,
0,
WIDTH,
HEIGHT,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should update GPU memory OK");
PIXEL_DATA_UPDATED = false;
}
}
gl.clear_color(0.0, 0.0, 0.0, 1.0);
gl.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
gl.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
//update_texture_and_draw(gl, texture, texture_location);
request_animation_frame(f.borrow().as_ref().unwrap());
}) as Box<dyn FnMut()>));
}
request_animation_frame(g.borrow().as_ref().unwrap());
}
pub fn compile_shader(
context: &WebGlRenderingContext,
shader_type: u32,
source: &str,
) -> Result<WebGlShader, String> {
let shader = context
.create_shader(shader_type)
.ok_or_else(|| String::from("Unable to create shader object"))?;
context.shader_source(&shader, source);
context.compile_shader(&shader);
if context
.get_shader_parameter(&shader, WebGlRenderingContext::COMPILE_STATUS)
.as_bool()
.unwrap_or(false)
{
Ok(shader)
} else {
Err(context
.get_shader_info_log(&shader)
.unwrap_or_else(|| String::from("Unknown error creating shader")))
}
}
pub fn link_program(
context: &WebGlRenderingContext,
vert_shader: &WebGlShader,
frag_shader: &WebGlShader,
) -> Result<WebGlProgram, String> {
let program = context
.create_program()
.ok_or_else(|| String::from("Unable to create shader object"))?;
context.attach_shader(&program, vert_shader);
context.attach_shader(&program, frag_shader);
context.link_program(&program);
if context
.get_program_parameter(&program, WebGlRenderingContext::LINK_STATUS)
.as_bool()
.unwrap_or(false)
{
Ok(program)
} else {
Err(context
.get_program_info_log(&program)
.unwrap_or_else(|| String::from("Unknown error creating program object")))
}
}
#[wasm_bindgen]
pub fn copy(data: &ImageData) -> Result<(), JsValue> {
unsafe {
// TODO use mutex
if PIXEL_DATA_UPDATED == false && PIXEL_DATA_UPDATING == false {
PIXEL_DATA_UPDATING = true;
for i in 0..BUFFER_SIZE {
PIXEL_DATA[i] = data.data()[i];
}
PIXEL_DATA_UPDATING = false;
PIXEL_DATA_UPDATED = true;
}
}
Ok(())
}
| {
window()
.request_animation_frame(f.as_ref().unchecked_ref())
.expect("should register `requestAnimationFrame` OK");
} | identifier_body |
async_stream_cdc.rs | //
// Copyright (c) 2023 Nathan Fiedler
//
use super::*;
#[cfg(all(feature = "futures", not(feature = "tokio")))]
use futures::{
io::{AsyncRead, AsyncReadExt},
stream::Stream,
};
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use tokio_stream::Stream;
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use tokio::io::{AsyncRead, AsyncReadExt};
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use async_stream::try_stream;
///
/// An async-streamable version of the FastCDC chunker implementation from 2020
/// with streaming support.
///
/// Use `new` to construct an instance, and then `as_stream` to produce an async
/// [Stream] of the chunks.
///
/// Both `futures` and `tokio`-based [AsyncRead] inputs are supported via
/// feature flags. But, if necessary you can also use the
/// [`async_compat`](https://docs.rs/async-compat/latest/async_compat/) crate to
/// adapt your inputs as circumstances may require.
///
/// Note that this struct allocates a `Vec<u8>` of `max_size` bytes to act as a
/// buffer when reading from the source and finding chunk boundaries.
///
/// ```no_run
/// # use std::fs::File;
/// # use fastcdc::v2020::AsyncStreamCDC;
/// # #[cfg(all(feature = "futures", not(feature = "tokio")))]
/// # use futures::stream::StreamExt;
/// # #[cfg(all(feature = "tokio", not(feature = "futures")))]
/// # use tokio_stream::StreamExt;
///
/// async fn run() {
/// let source = std::fs::read("test/fixtures/SekienAkashita.jpg").unwrap();
/// let mut chunker = AsyncStreamCDC::new(source.as_ref(), 4096, 16384, 65535);
/// let stream = chunker.as_stream();
///
/// let chunks = stream.collect::<Vec<_>>().await;
///
/// for result in chunks {
/// let chunk = result.unwrap();
/// println!("offset={} length={}", chunk.offset, chunk.length);
/// }
/// }
/// ```
///
pub struct AsyncStreamCDC<R> {
/// Buffer of data from source for finding cut points.
buffer: Vec<u8>,
/// Maximum capacity of the buffer (always `max_size`).
capacity: usize,
/// Number of relevant bytes in the `buffer`.
length: usize,
/// Source from which data is read into `buffer`.
source: R,
/// Number of bytes read from the source so far.
processed: u64,
/// True when the source produces no more data.
eof: bool,
min_size: usize,
avg_size: usize,
max_size: usize,
mask_s: u64,
mask_l: u64,
mask_s_ls: u64,
mask_l_ls: u64,
}
impl<R: AsyncRead + Unpin> AsyncStreamCDC<R> {
///
/// Construct a `StreamCDC` that will process bytes from the given source.
///
/// Uses chunk size normalization level 1 by default.
///
pub fn new(source: R, min_size: u32, avg_size: u32, max_size: u32) -> Self {
Self::with_level(source, min_size, avg_size, max_size, Normalization::Level1)
}
///
/// Create a new `StreamCDC` with the given normalization level.
///
pub fn with_level(
source: R,
min_size: u32,
avg_size: u32,
max_size: u32,
level: Normalization,
) -> Self {
assert!(min_size >= MINIMUM_MIN);
assert!(min_size <= MINIMUM_MAX);
assert!(avg_size >= AVERAGE_MIN);
assert!(avg_size <= AVERAGE_MAX);
assert!(max_size >= MAXIMUM_MIN);
assert!(max_size <= MAXIMUM_MAX);
let bits = logarithm2(avg_size);
let normalization = level.bits();
let mask_s = MASKS[(bits + normalization) as usize];
let mask_l = MASKS[(bits - normalization) as usize];
Self {
buffer: vec![0_u8; max_size as usize],
capacity: max_size as usize,
length: 0,
source,
eof: false,
processed: 0,
min_size: min_size as usize,
avg_size: avg_size as usize,
max_size: max_size as usize,
mask_s,
mask_l,
mask_s_ls: mask_s << 1,
mask_l_ls: mask_l << 1,
}
}
/// Fill the buffer with data from the source, returning the number of bytes
/// read (zero if end of source has been reached).
async fn fill_buffer(&mut self) -> Result<usize, Error> {
// this code originally copied from asuran crate
if self.eof {
Ok(0)
} else {
let mut all_bytes_read = 0;
while!self.eof && self.length < self.capacity {
let bytes_read = self.source.read(&mut self.buffer[self.length..]).await?;
if bytes_read == 0 {
self.eof = true;
} else {
self.length += bytes_read;
all_bytes_read += bytes_read;
}
}
Ok(all_bytes_read)
}
}
/// Drains a specified number of bytes from the buffer, then resizes the
/// buffer back to `capacity` size in preparation for further reads.
fn drain_bytes(&mut self, count: usize) -> Result<Vec<u8>, Error> {
// this code originally copied from asuran crate
if count > self.length {
Err(Error::Other(format!(
"drain_bytes() called with count larger than length: {} > {}",
count, self.length
)))
} else {
let data = self.buffer.drain(..count).collect::<Vec<u8>>();
self.length -= count;
self.buffer.resize(self.capacity, 0_u8);
Ok(data)
}
}
/// Find the next chunk in the source. If the end of the source has been
/// reached, returns `Error::Empty` as the error.
async fn read_chunk(&mut self) -> Result<ChunkData, Error> {
self.fill_buffer().await?;
if self.length == 0 {
Err(Error::Empty)
} else {
let (hash, count) = cut(
&self.buffer[..self.length],
self.min_size,
self.avg_size,
self.max_size,
self.mask_s,
self.mask_l,
self.mask_s_ls,
self.mask_l_ls,
);
if count == 0 {
Err(Error::Empty)
} else {
let offset = self.processed;
self.processed += count as u64;
let data = self.drain_bytes(count)?;
Ok(ChunkData {
hash,
offset,
length: count,
data,
})
}
}
}
#[cfg(all(feature = "tokio", not(feature = "futures")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
try_stream! {
loop {
match self.read_chunk().await {
Ok(chunk) => yield chunk,
Err(Error::Empty) => {
break;
}
error @ Err(_) => {
error?;
}
}
}
}
}
#[cfg(all(feature = "futures", not(feature = "tokio")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
futures::stream::unfold(self, |this| async {
let chunk = this.read_chunk().await;
if let Err(Error::Empty) = chunk {
None
} else {
Some((chunk, this))
}
})
}
}
#[cfg(test)]
mod tests {
use crate::v2020::MASKS;
use super::AsyncStreamCDC;
#[test]
#[should_panic]
fn test_minimum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 63, 256, 1024);
}
#[test]
#[should_panic]
fn test_minimum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 67_108_867, 256, 1024);
}
#[test]
#[should_panic]
fn | () {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 255, 1024);
}
#[test]
#[should_panic]
fn test_average_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 268_435_457, 1024);
}
#[test]
#[should_panic]
fn test_maximum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1023);
}
#[test]
#[should_panic]
fn test_maximum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1_073_741_825);
}
#[test]
fn test_masks() {
let source = [0u8; 1024];
let chunker = AsyncStreamCDC::new(source.as_slice(), 64, 256, 1024);
assert_eq!(chunker.mask_l, MASKS[7]);
assert_eq!(chunker.mask_s, MASKS[9]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 8192, 16384, 32768);
assert_eq!(chunker.mask_l, MASKS[13]);
assert_eq!(chunker.mask_s, MASKS[15]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 1_048_576, 4_194_304, 16_777_216);
assert_eq!(chunker.mask_l, MASKS[21]);
assert_eq!(chunker.mask_s, MASKS[23]);
}
struct ExpectedChunk {
hash: u64,
offset: u64,
length: usize,
digest: String,
}
use md5::{Digest, Md5};
#[cfg(all(feature = "futures", not(feature = "tokio")))]
use futures::stream::StreamExt;
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use tokio_stream::StreamExt;
#[cfg_attr(all(feature = "tokio", not(feature = "futures")), tokio::test)]
#[cfg_attr(all(feature = "futures", not(feature = "tokio")), futures_test::test)]
async fn test_iter_sekien_16k_chunks() {
let read_result = std::fs::read("test/fixtures/SekienAkashita.jpg");
assert!(read_result.is_ok());
let contents = read_result.unwrap();
// The digest values are not needed here, but they serve to validate
// that the streaming version tested below is returning the correct
// chunk data on each iteration.
let expected_chunks = vec![
ExpectedChunk {
hash: 17968276318003433923,
offset: 0,
length: 21325,
digest: "2bb52734718194617c957f5e07ee6054".into(),
},
ExpectedChunk {
hash: 8197189939299398838,
offset: 21325,
length: 17140,
digest: "badfb0757fe081c20336902e7131f768".into(),
},
ExpectedChunk {
hash: 13019990849178155730,
offset: 38465,
length: 28084,
digest: "18412d7414de6eb42f638351711f729d".into(),
},
ExpectedChunk {
hash: 4509236223063678303,
offset: 66549,
length: 18217,
digest: "04fe1405fc5f960363bfcd834c056407".into(),
},
ExpectedChunk {
hash: 2504464741100432583,
offset: 84766,
length: 24700,
digest: "1aa7ad95f274d6ba34a983946ebc5af3".into(),
},
];
let mut chunker = AsyncStreamCDC::new(contents.as_ref(), 4096, 16384, 65535);
let stream = chunker.as_stream();
let chunks = stream.collect::<Vec<_>>().await;
let mut index = 0;
for chunk in chunks {
let chunk = chunk.unwrap();
assert_eq!(chunk.hash, expected_chunks[index].hash);
assert_eq!(chunk.offset, expected_chunks[index].offset);
assert_eq!(chunk.length, expected_chunks[index].length);
let mut hasher = Md5::new();
hasher
.update(&contents[(chunk.offset as usize)..(chunk.offset as usize) + chunk.length]);
let table = hasher.finalize();
let digest = format!("{:x}", table);
assert_eq!(digest, expected_chunks[index].digest);
index += 1;
}
assert_eq!(index, 5);
}
}
| test_average_too_low | identifier_name |
async_stream_cdc.rs | //
// Copyright (c) 2023 Nathan Fiedler
//
use super::*;
#[cfg(all(feature = "futures", not(feature = "tokio")))]
use futures::{
io::{AsyncRead, AsyncReadExt},
stream::Stream,
};
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use tokio_stream::Stream;
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use tokio::io::{AsyncRead, AsyncReadExt};
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use async_stream::try_stream;
///
/// An async-streamable version of the FastCDC chunker implementation from 2020
/// with streaming support.
///
/// Use `new` to construct an instance, and then `as_stream` to produce an async
/// [Stream] of the chunks.
///
/// Both `futures` and `tokio`-based [AsyncRead] inputs are supported via
/// feature flags. But, if necessary you can also use the
/// [`async_compat`](https://docs.rs/async-compat/latest/async_compat/) crate to
/// adapt your inputs as circumstances may require.
///
/// Note that this struct allocates a `Vec<u8>` of `max_size` bytes to act as a
/// buffer when reading from the source and finding chunk boundaries.
///
/// ```no_run
/// # use std::fs::File;
/// # use fastcdc::v2020::AsyncStreamCDC;
/// # #[cfg(all(feature = "futures", not(feature = "tokio")))]
/// # use futures::stream::StreamExt;
/// # #[cfg(all(feature = "tokio", not(feature = "futures")))]
/// # use tokio_stream::StreamExt;
///
/// async fn run() {
/// let source = std::fs::read("test/fixtures/SekienAkashita.jpg").unwrap();
/// let mut chunker = AsyncStreamCDC::new(source.as_ref(), 4096, 16384, 65535);
/// let stream = chunker.as_stream();
///
/// let chunks = stream.collect::<Vec<_>>().await;
///
/// for result in chunks {
/// let chunk = result.unwrap();
/// println!("offset={} length={}", chunk.offset, chunk.length);
/// }
/// }
/// ```
///
pub struct AsyncStreamCDC<R> {
/// Buffer of data from source for finding cut points.
buffer: Vec<u8>,
/// Maximum capacity of the buffer (always `max_size`).
capacity: usize,
/// Number of relevant bytes in the `buffer`.
length: usize,
/// Source from which data is read into `buffer`.
source: R,
/// Number of bytes read from the source so far.
processed: u64,
/// True when the source produces no more data.
eof: bool,
min_size: usize,
avg_size: usize,
max_size: usize,
mask_s: u64,
mask_l: u64,
mask_s_ls: u64,
mask_l_ls: u64,
}
impl<R: AsyncRead + Unpin> AsyncStreamCDC<R> {
///
/// Construct a `StreamCDC` that will process bytes from the given source.
///
/// Uses chunk size normalization level 1 by default.
///
pub fn new(source: R, min_size: u32, avg_size: u32, max_size: u32) -> Self {
Self::with_level(source, min_size, avg_size, max_size, Normalization::Level1)
}
///
/// Create a new `StreamCDC` with the given normalization level.
///
pub fn with_level(
source: R,
min_size: u32,
avg_size: u32,
max_size: u32,
level: Normalization,
) -> Self {
assert!(min_size >= MINIMUM_MIN);
assert!(min_size <= MINIMUM_MAX);
assert!(avg_size >= AVERAGE_MIN);
assert!(avg_size <= AVERAGE_MAX);
assert!(max_size >= MAXIMUM_MIN);
assert!(max_size <= MAXIMUM_MAX);
let bits = logarithm2(avg_size);
let normalization = level.bits();
let mask_s = MASKS[(bits + normalization) as usize];
let mask_l = MASKS[(bits - normalization) as usize];
Self {
buffer: vec![0_u8; max_size as usize],
capacity: max_size as usize,
length: 0,
source,
eof: false,
processed: 0,
min_size: min_size as usize,
avg_size: avg_size as usize,
max_size: max_size as usize,
mask_s,
mask_l,
mask_s_ls: mask_s << 1,
mask_l_ls: mask_l << 1,
}
}
/// Fill the buffer with data from the source, returning the number of bytes
/// read (zero if end of source has been reached).
async fn fill_buffer(&mut self) -> Result<usize, Error> {
// this code originally copied from asuran crate
if self.eof {
Ok(0)
} else {
let mut all_bytes_read = 0;
while!self.eof && self.length < self.capacity {
let bytes_read = self.source.read(&mut self.buffer[self.length..]).await?;
if bytes_read == 0 {
self.eof = true;
} else |
}
Ok(all_bytes_read)
}
}
/// Drains a specified number of bytes from the buffer, then resizes the
/// buffer back to `capacity` size in preparation for further reads.
fn drain_bytes(&mut self, count: usize) -> Result<Vec<u8>, Error> {
// this code originally copied from asuran crate
if count > self.length {
Err(Error::Other(format!(
"drain_bytes() called with count larger than length: {} > {}",
count, self.length
)))
} else {
let data = self.buffer.drain(..count).collect::<Vec<u8>>();
self.length -= count;
self.buffer.resize(self.capacity, 0_u8);
Ok(data)
}
}
/// Find the next chunk in the source. If the end of the source has been
/// reached, returns `Error::Empty` as the error.
async fn read_chunk(&mut self) -> Result<ChunkData, Error> {
self.fill_buffer().await?;
if self.length == 0 {
Err(Error::Empty)
} else {
let (hash, count) = cut(
&self.buffer[..self.length],
self.min_size,
self.avg_size,
self.max_size,
self.mask_s,
self.mask_l,
self.mask_s_ls,
self.mask_l_ls,
);
if count == 0 {
Err(Error::Empty)
} else {
let offset = self.processed;
self.processed += count as u64;
let data = self.drain_bytes(count)?;
Ok(ChunkData {
hash,
offset,
length: count,
data,
})
}
}
}
#[cfg(all(feature = "tokio", not(feature = "futures")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
try_stream! {
loop {
match self.read_chunk().await {
Ok(chunk) => yield chunk,
Err(Error::Empty) => {
break;
}
error @ Err(_) => {
error?;
}
}
}
}
}
#[cfg(all(feature = "futures", not(feature = "tokio")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
futures::stream::unfold(self, |this| async {
let chunk = this.read_chunk().await;
if let Err(Error::Empty) = chunk {
None
} else {
Some((chunk, this))
}
})
}
}
#[cfg(test)]
mod tests {
use crate::v2020::MASKS;
use super::AsyncStreamCDC;
#[test]
#[should_panic]
fn test_minimum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 63, 256, 1024);
}
#[test]
#[should_panic]
fn test_minimum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 67_108_867, 256, 1024);
}
#[test]
#[should_panic]
fn test_average_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 255, 1024);
}
#[test]
#[should_panic]
fn test_average_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 268_435_457, 1024);
}
#[test]
#[should_panic]
fn test_maximum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1023);
}
#[test]
#[should_panic]
fn test_maximum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1_073_741_825);
}
#[test]
fn test_masks() {
let source = [0u8; 1024];
let chunker = AsyncStreamCDC::new(source.as_slice(), 64, 256, 1024);
assert_eq!(chunker.mask_l, MASKS[7]);
assert_eq!(chunker.mask_s, MASKS[9]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 8192, 16384, 32768);
assert_eq!(chunker.mask_l, MASKS[13]);
assert_eq!(chunker.mask_s, MASKS[15]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 1_048_576, 4_194_304, 16_777_216);
assert_eq!(chunker.mask_l, MASKS[21]);
assert_eq!(chunker.mask_s, MASKS[23]);
}
struct ExpectedChunk {
hash: u64,
offset: u64,
length: usize,
digest: String,
}
use md5::{Digest, Md5};
#[cfg(all(feature = "futures", not(feature = "tokio")))]
use futures::stream::StreamExt;
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use tokio_stream::StreamExt;
#[cfg_attr(all(feature = "tokio", not(feature = "futures")), tokio::test)]
#[cfg_attr(all(feature = "futures", not(feature = "tokio")), futures_test::test)]
async fn test_iter_sekien_16k_chunks() {
let read_result = std::fs::read("test/fixtures/SekienAkashita.jpg");
assert!(read_result.is_ok());
let contents = read_result.unwrap();
// The digest values are not needed here, but they serve to validate
// that the streaming version tested below is returning the correct
// chunk data on each iteration.
let expected_chunks = vec![
ExpectedChunk {
hash: 17968276318003433923,
offset: 0,
length: 21325,
digest: "2bb52734718194617c957f5e07ee6054".into(),
},
ExpectedChunk {
hash: 8197189939299398838,
offset: 21325,
length: 17140,
digest: "badfb0757fe081c20336902e7131f768".into(),
},
ExpectedChunk {
hash: 13019990849178155730,
offset: 38465,
length: 28084,
digest: "18412d7414de6eb42f638351711f729d".into(),
},
ExpectedChunk {
hash: 4509236223063678303,
offset: 66549,
length: 18217,
digest: "04fe1405fc5f960363bfcd834c056407".into(),
},
ExpectedChunk {
hash: 2504464741100432583,
offset: 84766,
length: 24700,
digest: "1aa7ad95f274d6ba34a983946ebc5af3".into(),
},
];
let mut chunker = AsyncStreamCDC::new(contents.as_ref(), 4096, 16384, 65535);
let stream = chunker.as_stream();
let chunks = stream.collect::<Vec<_>>().await;
let mut index = 0;
for chunk in chunks {
let chunk = chunk.unwrap();
assert_eq!(chunk.hash, expected_chunks[index].hash);
assert_eq!(chunk.offset, expected_chunks[index].offset);
assert_eq!(chunk.length, expected_chunks[index].length);
let mut hasher = Md5::new();
hasher
.update(&contents[(chunk.offset as usize)..(chunk.offset as usize) + chunk.length]);
let table = hasher.finalize();
let digest = format!("{:x}", table);
assert_eq!(digest, expected_chunks[index].digest);
index += 1;
}
assert_eq!(index, 5);
}
}
| {
self.length += bytes_read;
all_bytes_read += bytes_read;
} | conditional_block |
async_stream_cdc.rs | //
// Copyright (c) 2023 Nathan Fiedler
//
use super::*;
#[cfg(all(feature = "futures", not(feature = "tokio")))]
use futures::{
io::{AsyncRead, AsyncReadExt},
stream::Stream,
};
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use tokio_stream::Stream;
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use tokio::io::{AsyncRead, AsyncReadExt};
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use async_stream::try_stream;
///
/// An async-streamable version of the FastCDC chunker implementation from 2020
/// with streaming support.
///
/// Use `new` to construct an instance, and then `as_stream` to produce an async
/// [Stream] of the chunks.
///
/// Both `futures` and `tokio`-based [AsyncRead] inputs are supported via
/// feature flags. But, if necessary you can also use the
/// [`async_compat`](https://docs.rs/async-compat/latest/async_compat/) crate to
/// adapt your inputs as circumstances may require.
///
/// Note that this struct allocates a `Vec<u8>` of `max_size` bytes to act as a
/// buffer when reading from the source and finding chunk boundaries.
///
/// ```no_run
/// # use std::fs::File;
/// # use fastcdc::v2020::AsyncStreamCDC;
/// # #[cfg(all(feature = "futures", not(feature = "tokio")))]
/// # use futures::stream::StreamExt;
/// # #[cfg(all(feature = "tokio", not(feature = "futures")))]
/// # use tokio_stream::StreamExt;
///
/// async fn run() {
/// let source = std::fs::read("test/fixtures/SekienAkashita.jpg").unwrap();
/// let mut chunker = AsyncStreamCDC::new(source.as_ref(), 4096, 16384, 65535);
/// let stream = chunker.as_stream();
///
/// let chunks = stream.collect::<Vec<_>>().await;
///
/// for result in chunks {
/// let chunk = result.unwrap();
/// println!("offset={} length={}", chunk.offset, chunk.length);
/// }
/// }
/// ```
///
pub struct AsyncStreamCDC<R> {
/// Buffer of data from source for finding cut points.
buffer: Vec<u8>,
/// Maximum capacity of the buffer (always `max_size`).
capacity: usize,
/// Number of relevant bytes in the `buffer`.
length: usize,
/// Source from which data is read into `buffer`.
source: R,
/// Number of bytes read from the source so far.
processed: u64, | max_size: usize,
mask_s: u64,
mask_l: u64,
mask_s_ls: u64,
mask_l_ls: u64,
}
impl<R: AsyncRead + Unpin> AsyncStreamCDC<R> {
///
/// Construct a `StreamCDC` that will process bytes from the given source.
///
/// Uses chunk size normalization level 1 by default.
///
pub fn new(source: R, min_size: u32, avg_size: u32, max_size: u32) -> Self {
Self::with_level(source, min_size, avg_size, max_size, Normalization::Level1)
}
///
/// Create a new `StreamCDC` with the given normalization level.
///
pub fn with_level(
source: R,
min_size: u32,
avg_size: u32,
max_size: u32,
level: Normalization,
) -> Self {
assert!(min_size >= MINIMUM_MIN);
assert!(min_size <= MINIMUM_MAX);
assert!(avg_size >= AVERAGE_MIN);
assert!(avg_size <= AVERAGE_MAX);
assert!(max_size >= MAXIMUM_MIN);
assert!(max_size <= MAXIMUM_MAX);
let bits = logarithm2(avg_size);
let normalization = level.bits();
let mask_s = MASKS[(bits + normalization) as usize];
let mask_l = MASKS[(bits - normalization) as usize];
Self {
buffer: vec![0_u8; max_size as usize],
capacity: max_size as usize,
length: 0,
source,
eof: false,
processed: 0,
min_size: min_size as usize,
avg_size: avg_size as usize,
max_size: max_size as usize,
mask_s,
mask_l,
mask_s_ls: mask_s << 1,
mask_l_ls: mask_l << 1,
}
}
/// Fill the buffer with data from the source, returning the number of bytes
/// read (zero if end of source has been reached).
async fn fill_buffer(&mut self) -> Result<usize, Error> {
// this code originally copied from asuran crate
if self.eof {
Ok(0)
} else {
let mut all_bytes_read = 0;
while!self.eof && self.length < self.capacity {
let bytes_read = self.source.read(&mut self.buffer[self.length..]).await?;
if bytes_read == 0 {
self.eof = true;
} else {
self.length += bytes_read;
all_bytes_read += bytes_read;
}
}
Ok(all_bytes_read)
}
}
/// Drains a specified number of bytes from the buffer, then resizes the
/// buffer back to `capacity` size in preparation for further reads.
fn drain_bytes(&mut self, count: usize) -> Result<Vec<u8>, Error> {
// this code originally copied from asuran crate
if count > self.length {
Err(Error::Other(format!(
"drain_bytes() called with count larger than length: {} > {}",
count, self.length
)))
} else {
let data = self.buffer.drain(..count).collect::<Vec<u8>>();
self.length -= count;
self.buffer.resize(self.capacity, 0_u8);
Ok(data)
}
}
/// Find the next chunk in the source. If the end of the source has been
/// reached, returns `Error::Empty` as the error.
async fn read_chunk(&mut self) -> Result<ChunkData, Error> {
self.fill_buffer().await?;
if self.length == 0 {
Err(Error::Empty)
} else {
let (hash, count) = cut(
&self.buffer[..self.length],
self.min_size,
self.avg_size,
self.max_size,
self.mask_s,
self.mask_l,
self.mask_s_ls,
self.mask_l_ls,
);
if count == 0 {
Err(Error::Empty)
} else {
let offset = self.processed;
self.processed += count as u64;
let data = self.drain_bytes(count)?;
Ok(ChunkData {
hash,
offset,
length: count,
data,
})
}
}
}
#[cfg(all(feature = "tokio", not(feature = "futures")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
try_stream! {
loop {
match self.read_chunk().await {
Ok(chunk) => yield chunk,
Err(Error::Empty) => {
break;
}
error @ Err(_) => {
error?;
}
}
}
}
}
#[cfg(all(feature = "futures", not(feature = "tokio")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
futures::stream::unfold(self, |this| async {
let chunk = this.read_chunk().await;
if let Err(Error::Empty) = chunk {
None
} else {
Some((chunk, this))
}
})
}
}
#[cfg(test)]
mod tests {
use crate::v2020::MASKS;
use super::AsyncStreamCDC;
#[test]
#[should_panic]
fn test_minimum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 63, 256, 1024);
}
#[test]
#[should_panic]
fn test_minimum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 67_108_867, 256, 1024);
}
#[test]
#[should_panic]
fn test_average_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 255, 1024);
}
#[test]
#[should_panic]
fn test_average_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 268_435_457, 1024);
}
#[test]
#[should_panic]
fn test_maximum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1023);
}
#[test]
#[should_panic]
fn test_maximum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1_073_741_825);
}
#[test]
fn test_masks() {
let source = [0u8; 1024];
let chunker = AsyncStreamCDC::new(source.as_slice(), 64, 256, 1024);
assert_eq!(chunker.mask_l, MASKS[7]);
assert_eq!(chunker.mask_s, MASKS[9]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 8192, 16384, 32768);
assert_eq!(chunker.mask_l, MASKS[13]);
assert_eq!(chunker.mask_s, MASKS[15]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 1_048_576, 4_194_304, 16_777_216);
assert_eq!(chunker.mask_l, MASKS[21]);
assert_eq!(chunker.mask_s, MASKS[23]);
}
struct ExpectedChunk {
hash: u64,
offset: u64,
length: usize,
digest: String,
}
use md5::{Digest, Md5};
#[cfg(all(feature = "futures", not(feature = "tokio")))]
use futures::stream::StreamExt;
#[cfg(all(feature = "tokio", not(feature = "futures")))]
use tokio_stream::StreamExt;
#[cfg_attr(all(feature = "tokio", not(feature = "futures")), tokio::test)]
#[cfg_attr(all(feature = "futures", not(feature = "tokio")), futures_test::test)]
async fn test_iter_sekien_16k_chunks() {
let read_result = std::fs::read("test/fixtures/SekienAkashita.jpg");
assert!(read_result.is_ok());
let contents = read_result.unwrap();
// The digest values are not needed here, but they serve to validate
// that the streaming version tested below is returning the correct
// chunk data on each iteration.
let expected_chunks = vec![
ExpectedChunk {
hash: 17968276318003433923,
offset: 0,
length: 21325,
digest: "2bb52734718194617c957f5e07ee6054".into(),
},
ExpectedChunk {
hash: 8197189939299398838,
offset: 21325,
length: 17140,
digest: "badfb0757fe081c20336902e7131f768".into(),
},
ExpectedChunk {
hash: 13019990849178155730,
offset: 38465,
length: 28084,
digest: "18412d7414de6eb42f638351711f729d".into(),
},
ExpectedChunk {
hash: 4509236223063678303,
offset: 66549,
length: 18217,
digest: "04fe1405fc5f960363bfcd834c056407".into(),
},
ExpectedChunk {
hash: 2504464741100432583,
offset: 84766,
length: 24700,
digest: "1aa7ad95f274d6ba34a983946ebc5af3".into(),
},
];
let mut chunker = AsyncStreamCDC::new(contents.as_ref(), 4096, 16384, 65535);
let stream = chunker.as_stream();
let chunks = stream.collect::<Vec<_>>().await;
let mut index = 0;
for chunk in chunks {
let chunk = chunk.unwrap();
assert_eq!(chunk.hash, expected_chunks[index].hash);
assert_eq!(chunk.offset, expected_chunks[index].offset);
assert_eq!(chunk.length, expected_chunks[index].length);
let mut hasher = Md5::new();
hasher
.update(&contents[(chunk.offset as usize)..(chunk.offset as usize) + chunk.length]);
let table = hasher.finalize();
let digest = format!("{:x}", table);
assert_eq!(digest, expected_chunks[index].digest);
index += 1;
}
assert_eq!(index, 5);
}
} | /// True when the source produces no more data.
eof: bool,
min_size: usize,
avg_size: usize, | random_line_split |
impl_encryption.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use openssl::hash::{self, MessageDigest};
use tidb_query_codegen::rpn_fn;
use tidb_query_datatype::expr::{Error, EvalContext};
use tidb_query_common::Result;
use tidb_query_datatype::codec::data_type::*;
use tidb_query_shared_expr::rand::{gen_random_bytes, MAX_RAND_BYTES_LENGTH};
const SHA0: i64 = 0;
const SHA224: i64 = 224;
const SHA256: i64 = 256;
const SHA384: i64 = 384;
const SHA512: i64 = 512;
#[rpn_fn(nullable)]
#[inline]
pub fn md5(arg: Option<BytesRef>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => hex_digest(MessageDigest::md5(), arg).map(Some),
None => Ok(None),
}
}
#[rpn_fn(nullable)]
#[inline]
pub fn sha1(arg: Option<BytesRef>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => hex_digest(MessageDigest::sha1(), arg).map(Some),
None => Ok(None),
}
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn sha2(
ctx: &mut EvalContext,
input: Option<BytesRef>,
hash_length: Option<&Int>,
) -> Result<Option<Bytes>> {
match (input, hash_length) {
(Some(input), Some(hash_length)) => {
let sha2 = match *hash_length {
SHA0 | SHA256 => MessageDigest::sha256(),
SHA224 => MessageDigest::sha224(),
SHA384 => MessageDigest::sha384(),
SHA512 => MessageDigest::sha512(),
_ => {
ctx.warnings
.append_warning(Error::incorrect_parameters("sha2"));
return Ok(None);
}
};
hex_digest(sha2, input).map(Some)
}
_ => Ok(None),
}
}
// https://dev.mysql.com/doc/refman/5.7/en/password-hashing.html
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn password(ctx: &mut EvalContext, input: Option<BytesRef>) -> Result<Option<Bytes>> {
ctx.warnings.append_warning(Error::Other(box_err!(
"Warning: Deprecated syntax PASSWORD"
)));
match input {
Some(bytes) => {
if bytes.is_empty() {
Ok(Some(Vec::new()))
} else {
let hash1 = hex_digest(MessageDigest::sha1(), bytes)?;
let mut hash2 = hex_digest(MessageDigest::sha1(), hash1.as_slice())?;
hash2.insert(0, b'*');
Ok(Some(hash2))
}
}
None => Ok(None),
}
}
#[inline]
fn hex_digest(hashtype: MessageDigest, input: &[u8]) -> Result<Bytes> {
hash::hash(hashtype, input)
.map(|digest| hex::encode(digest).into_bytes())
.map_err(|e| box_err!("OpenSSL error: {:?}", e))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn uncompressed_length(ctx: &mut EvalContext, arg: Option<BytesRef>) -> Result<Option<Int>> {
use byteorder::{ByteOrder, LittleEndian};
Ok(arg.as_ref().map(|s| {
if s.is_empty() {
0
} else if s.len() <= 4 {
ctx.warnings.append_warning(Error::zlib_data_corrupted());
0
} else {
Int::from(LittleEndian::read_u32(&s[0..4]))
}
}))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn random_bytes(_ctx: &mut EvalContext, arg: Option<&Int>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => {
if *arg < 1 || *arg > MAX_RAND_BYTES_LENGTH {
return Err(Error::overflow("length", "random_bytes").into());
}
Ok(Some(gen_random_bytes(*arg as usize)))
}
_ => Ok(None),
}
}
#[cfg(test)]
mod tests {
use tipb::ScalarFuncSig;
use super::*;
use crate::types::test_util::RpnFnScalarEvaluator;
fn test_unary_func_ok_none<'a, I: EvaluableRef<'a>, O: EvaluableRet>(sig: ScalarFuncSig)
where
O: PartialEq,
Option<I>: Into<ScalarValue>,
Option<O>: From<ScalarValue>,
{
assert_eq!(
None,
RpnFnScalarEvaluator::new()
.push_param(Option::<I>::None)
.evaluate::<O>(sig)
.unwrap()
);
}
#[test]
fn test_md5() {
let test_cases = vec![
(vec![], "d41d8cd98f00b204e9800998ecf8427e"),
(b"a".to_vec(), "0cc175b9c0f1b6a831c399e269772661"),
(b"ab".to_vec(), "187ef4436122d1cc2f40dc2b92f0eba0"),
(b"abc".to_vec(), "900150983cd24fb0d6963f7d28e17f72"),
(b"123".to_vec(), "202cb962ac59075b964b07152d234b70"),
(
"你好".as_bytes().to_vec(),
"7eca689f0d3389d9dea66ae112e5cfd7",
),
(
"分布式データベース".as_bytes().to_vec(),
"63c0354797bd261e2cbf8581147eeeda",
),
(vec![0xc0, 0x80], "b26555f33aedac7b2684438cc5d4d05e"),
(vec![0xED, 0xA0, 0x80], "546d3dc8de10fbf8b448f678a47901e4"),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Md5)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Md5);
}
#[test]
fn test_sha1() {
let test_cases = vec![
(vec![], "da39a3ee5e6b4b0d3255bfef95601890afd80709"),
(b"a".to_vec(), "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"),
(b"ab".to_vec(), "da23614e02469a0d7c7bd1bdab5c9c474b1904dc"),
(b"abc".to_vec(), "a9993e364706816aba3e25717850c26c9cd0d89d"),
(b"123".to_vec(), "40bd001563085fc35165329ea1ff5c5ecbdbbeef"),
(
"你好".as_bytes().to_vec(),
"440ee0853ad1e99f962b63e459ef992d7c211722",
),
(
"分布式データベース".as_bytes().to_vec(),
"82aa64080df2ca37550ddfc3419d75ac1df3e0d0",
),
(vec![0xc0, 0x80], "8bf4822782a21d7ac68ece130ac36987548003bd"),
(
vec![0xED, 0xA0, 0x80],
"10db70ec072d000c68dd95879f9b831e43a859fd",
),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Sha1)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Sha1);
}
#[test]
fn test_uncompressed_length() {
let cases = vec![
(Some(""), Some(0)),
(
Some("0B000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(11),
),
(
Some("0C000000789CCB48CDC9C95728CF2F32303402001D8004202E"),
Some(12),
),
(Some("020000000000"), Some(2)),
(Some("0000000001"), Some(0)),
(
Some("02000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(2),
),
(Some("010203"), Some(0)),
(Some("01020304"), Some(0)),
(None, None),
];
for (s, exp) in cases {
let s = s.map(|inner| hex::decode(inner.as_bytes().to_vec()).unwrap());
let output = RpnFnScalarEvaluator::new()
.push_param(s)
.evaluate(ScalarFuncSig::UncompressedLength)
.unwrap();
assert_eq!(output, exp);
}
}
#[test]
fn test_sha2() {
let cases = vec![
("pingcap", 0, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 224, "cd036dc9bec69e758401379c522454ea24a6327b48724b449b40c6b7"),
("pingcap", 256, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 384, "c50955b6b0c7b9919740d956849eedcb0f0f90bf8a34e8c1f4e071e3773f53bd6f8f16c04425ff728bed04de1b63db51"),
("pingcap", 512, "ea903c574370774c4844a83b7122105a106e04211673810e1baae7c2ae7aba2cf07465e02f6c413126111ef74a417232683ce7ba210052e63c15fc82204aad80"),
("13572468", 0, "1c91ab1c162fd0cae60a5bb9880f3e7d5a133a65b6057a644b26973d9c55dcfe"),
("13572468", 224, "8ad67735bbf49576219f364f4640d595357a440358d15bf6815a16e4"),
("13572468", 256, "1c91ab1c162fd0cae60a5bb9880f3e7d5a133a65b6057a644b26973d9c55dcfe"),
("13572468.123", 384, "3b4ee302435dc1e15251efd9f3982b1ca6fe4ac778d3260b7bbf3bea613849677eda830239420e448e4c6dc7c2649d89"),
("13572468.123", 512, "4820aa3f2760836557dc1f2d44a0ba7596333fdb60c8a1909481862f4ab0921c00abb23d57b7e67a970363cc3fcb78b25b6a0d45cdcac0e87aa0c96bc51f7f96"),
];
for (input_str, hash_length_i64, exp_str) in cases {
let exp = Some(Bytes::from(exp_str));
let got = RpnFnScalarEvaluator::new()
.push_param(Some(Bytes::from(input_str)))
.push_param(Some(Int::from(hash_length_i64)))
.evaluate::<Bytes>(ScalarFuncSig::Sha2)
.unwrap();
assert_eq!(got, exp, "sha2('{:?}', {:?})", input_str, hash_length_i64);
}
let null_cases = vec![
(ScalarValue::Bytes(None), ScalarValue::Int(Some(1))),
(
ScalarValue::Bytes(Some(b"13572468".to_vec())),
ScalarValue::Int(None),
),
(ScalarValue::Bytes(None), ScalarValue::Int(None)),
(
ScalarValue::Bytes(Some(b"pingcap".to_vec())),
ScalarValue::Int(Some(-1)),
),
(
ScalarValue::Bytes(Some(b"13572468".to_vec())),
ScalarValue::Int(Some(999)),
),
];
for (input_str, hash_length_i64) in null_cases {
assert!(RpnFnScalarEvaluator::new()
.push_param(input_str)
.push_param(hash_length_i64)
.evaluate::<Bytes>(ScalarFuncSig::Sha2)
.unwrap()
.is_none())
}
}
#[test]
fn test_random_bytes() {
let cases = vec![1, 32, 233, 1024];
for len in cases {
let got = RpnFnScalarEvaluator::new()
.push_param(Some(Int::from(len as i64)))
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.unwrap();
assert_eq!(got.unwrap().len(), len);
}
let overflow_tests = vec![
ScalarValue::Int(Some(-32)),
ScalarValue::Int(Some(1025)),
ScalarValue::Int(Some(0)),
];
for len in overflow_tests {
assert!(RpnFnScalarEvaluator::new()
.push_param(len)
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.is_err(),);
}
//test NULL case
assert!(RpnFnScalarEvaluator::new()
.push_param(ScalarValue::Int(None))
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.unwrap()
.is_none())
}
#[test]
fn test_password() {
let cases = vec![
("Ti | assert_eq!(None, res)
}
}
| KV", "*cca644408381f962dba8dfb9889db1371ee74208"),
("Pingcap", "*f33bc75eac70ac317621fbbfa560d6251c43cf8a"),
("rust", "*090c2b08e0c1776910e777b917c2185be6554c2e"),
("database", "*02e86b4af5219d0ba6c974908aea62d42eb7da24"),
("raft", "*b23a77787ed44e62ef2570f03ce8982d119fb699"),
];
for (input, output) in cases {
let res = RpnFnScalarEvaluator::new()
.push_param(Some(Bytes::from(input)))
.evaluate::<Bytes>(ScalarFuncSig::Password)
.unwrap();
assert_eq!(res, Some(Bytes::from(output)))
}
// test for null
let res = RpnFnScalarEvaluator::new()
.push_param(ScalarValue::Bytes(None))
.evaluate::<Bytes>(ScalarFuncSig::Password)
.unwrap(); | identifier_body |
impl_encryption.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use openssl::hash::{self, MessageDigest};
use tidb_query_codegen::rpn_fn;
use tidb_query_datatype::expr::{Error, EvalContext};
use tidb_query_common::Result;
use tidb_query_datatype::codec::data_type::*;
use tidb_query_shared_expr::rand::{gen_random_bytes, MAX_RAND_BYTES_LENGTH};
const SHA0: i64 = 0;
const SHA224: i64 = 224;
const SHA256: i64 = 256;
const SHA384: i64 = 384;
const SHA512: i64 = 512;
#[rpn_fn(nullable)]
#[inline]
pub fn md5(arg: Option<BytesRef>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => hex_digest(MessageDigest::md5(), arg).map(Some),
None => Ok(None),
}
}
#[rpn_fn(nullable)]
#[inline]
pub fn sha1(arg: Option<BytesRef>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => hex_digest(MessageDigest::sha1(), arg).map(Some),
None => Ok(None),
}
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn sha2(
ctx: &mut EvalContext,
input: Option<BytesRef>,
hash_length: Option<&Int>,
) -> Result<Option<Bytes>> {
match (input, hash_length) {
(Some(input), Some(hash_length)) => {
let sha2 = match *hash_length {
SHA0 | SHA256 => MessageDigest::sha256(),
SHA224 => MessageDigest::sha224(),
SHA384 => MessageDigest::sha384(),
SHA512 => MessageDigest::sha512(),
_ => {
ctx.warnings
.append_warning(Error::incorrect_parameters("sha2"));
return Ok(None);
}
};
hex_digest(sha2, input).map(Some)
}
_ => Ok(None),
}
}
// https://dev.mysql.com/doc/refman/5.7/en/password-hashing.html
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn password(ctx: &mut EvalContext, input: Option<BytesRef>) -> Result<Option<Bytes>> {
ctx.warnings.append_warning(Error::Other(box_err!(
"Warning: Deprecated syntax PASSWORD"
)));
match input {
Some(bytes) => {
if bytes.is_empty() {
Ok(Some(Vec::new()))
} else {
let hash1 = hex_digest(MessageDigest::sha1(), bytes)?;
let mut hash2 = hex_digest(MessageDigest::sha1(), hash1.as_slice())?;
hash2.insert(0, b'*');
Ok(Some(hash2))
}
}
None => Ok(None),
}
}
#[inline]
fn hex_digest(hashtype: MessageDigest, input: &[u8]) -> Result<Bytes> {
hash::hash(hashtype, input)
.map(|digest| hex::encode(digest).into_bytes())
.map_err(|e| box_err!("OpenSSL error: {:?}", e))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn uncompressed_length(ctx: &mut EvalContext, arg: Option<BytesRef>) -> Result<Option<Int>> {
use byteorder::{ByteOrder, LittleEndian};
Ok(arg.as_ref().map(|s| {
if s.is_empty() {
0
} else if s.len() <= 4 {
ctx.warnings.append_warning(Error::zlib_data_corrupted());
0
} else {
Int::from(LittleEndian::read_u32(&s[0..4]))
}
}))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn random_bytes(_ctx: &mut EvalContext, arg: Option<&Int>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => {
if *arg < 1 || *arg > MAX_RAND_BYTES_LENGTH {
return Err(Error::overflow("length", "random_bytes").into());
}
Ok(Some(gen_random_bytes(*arg as usize)))
}
_ => Ok(None),
}
}
#[cfg(test)]
mod tests {
use tipb::ScalarFuncSig;
use super::*;
use crate::types::test_util::RpnFnScalarEvaluator;
fn test_unary_func_ok_none<'a, I: EvaluableRef<'a>, O: EvaluableRet>(sig: ScalarFuncSig)
where
O: PartialEq,
Option<I>: Into<ScalarValue>,
Option<O>: From<ScalarValue>,
{
assert_eq!(
None,
RpnFnScalarEvaluator::new()
.push_param(Option::<I>::None)
.evaluate::<O>(sig)
.unwrap()
);
}
#[test]
fn test_md5() {
let test_cases = vec![ | (b"abc".to_vec(), "900150983cd24fb0d6963f7d28e17f72"),
(b"123".to_vec(), "202cb962ac59075b964b07152d234b70"),
(
"你好".as_bytes().to_vec(),
"7eca689f0d3389d9dea66ae112e5cfd7",
),
(
"分布式データベース".as_bytes().to_vec(),
"63c0354797bd261e2cbf8581147eeeda",
),
(vec![0xc0, 0x80], "b26555f33aedac7b2684438cc5d4d05e"),
(vec![0xED, 0xA0, 0x80], "546d3dc8de10fbf8b448f678a47901e4"),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Md5)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Md5);
}
#[test]
fn test_sha1() {
let test_cases = vec![
(vec![], "da39a3ee5e6b4b0d3255bfef95601890afd80709"),
(b"a".to_vec(), "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"),
(b"ab".to_vec(), "da23614e02469a0d7c7bd1bdab5c9c474b1904dc"),
(b"abc".to_vec(), "a9993e364706816aba3e25717850c26c9cd0d89d"),
(b"123".to_vec(), "40bd001563085fc35165329ea1ff5c5ecbdbbeef"),
(
"你好".as_bytes().to_vec(),
"440ee0853ad1e99f962b63e459ef992d7c211722",
),
(
"分布式データベース".as_bytes().to_vec(),
"82aa64080df2ca37550ddfc3419d75ac1df3e0d0",
),
(vec![0xc0, 0x80], "8bf4822782a21d7ac68ece130ac36987548003bd"),
(
vec![0xED, 0xA0, 0x80],
"10db70ec072d000c68dd95879f9b831e43a859fd",
),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Sha1)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Sha1);
}
#[test]
fn test_uncompressed_length() {
let cases = vec![
(Some(""), Some(0)),
(
Some("0B000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(11),
),
(
Some("0C000000789CCB48CDC9C95728CF2F32303402001D8004202E"),
Some(12),
),
(Some("020000000000"), Some(2)),
(Some("0000000001"), Some(0)),
(
Some("02000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(2),
),
(Some("010203"), Some(0)),
(Some("01020304"), Some(0)),
(None, None),
];
for (s, exp) in cases {
let s = s.map(|inner| hex::decode(inner.as_bytes().to_vec()).unwrap());
let output = RpnFnScalarEvaluator::new()
.push_param(s)
.evaluate(ScalarFuncSig::UncompressedLength)
.unwrap();
assert_eq!(output, exp);
}
}
#[test]
fn test_sha2() {
let cases = vec![
("pingcap", 0, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 224, "cd036dc9bec69e758401379c522454ea24a6327b48724b449b40c6b7"),
("pingcap", 256, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 384, "c50955b6b0c7b9919740d956849eedcb0f0f90bf8a34e8c1f4e071e3773f53bd6f8f16c04425ff728bed04de1b63db51"),
("pingcap", 512, "ea903c574370774c4844a83b7122105a106e04211673810e1baae7c2ae7aba2cf07465e02f6c413126111ef74a417232683ce7ba210052e63c15fc82204aad80"),
("13572468", 0, "1c91ab1c162fd0cae60a5bb9880f3e7d5a133a65b6057a644b26973d9c55dcfe"),
("13572468", 224, "8ad67735bbf49576219f364f4640d595357a440358d15bf6815a16e4"),
("13572468", 256, "1c91ab1c162fd0cae60a5bb9880f3e7d5a133a65b6057a644b26973d9c55dcfe"),
("13572468.123", 384, "3b4ee302435dc1e15251efd9f3982b1ca6fe4ac778d3260b7bbf3bea613849677eda830239420e448e4c6dc7c2649d89"),
("13572468.123", 512, "4820aa3f2760836557dc1f2d44a0ba7596333fdb60c8a1909481862f4ab0921c00abb23d57b7e67a970363cc3fcb78b25b6a0d45cdcac0e87aa0c96bc51f7f96"),
];
for (input_str, hash_length_i64, exp_str) in cases {
let exp = Some(Bytes::from(exp_str));
let got = RpnFnScalarEvaluator::new()
.push_param(Some(Bytes::from(input_str)))
.push_param(Some(Int::from(hash_length_i64)))
.evaluate::<Bytes>(ScalarFuncSig::Sha2)
.unwrap();
assert_eq!(got, exp, "sha2('{:?}', {:?})", input_str, hash_length_i64);
}
let null_cases = vec![
(ScalarValue::Bytes(None), ScalarValue::Int(Some(1))),
(
ScalarValue::Bytes(Some(b"13572468".to_vec())),
ScalarValue::Int(None),
),
(ScalarValue::Bytes(None), ScalarValue::Int(None)),
(
ScalarValue::Bytes(Some(b"pingcap".to_vec())),
ScalarValue::Int(Some(-1)),
),
(
ScalarValue::Bytes(Some(b"13572468".to_vec())),
ScalarValue::Int(Some(999)),
),
];
for (input_str, hash_length_i64) in null_cases {
assert!(RpnFnScalarEvaluator::new()
.push_param(input_str)
.push_param(hash_length_i64)
.evaluate::<Bytes>(ScalarFuncSig::Sha2)
.unwrap()
.is_none())
}
}
#[test]
fn test_random_bytes() {
let cases = vec![1, 32, 233, 1024];
for len in cases {
let got = RpnFnScalarEvaluator::new()
.push_param(Some(Int::from(len as i64)))
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.unwrap();
assert_eq!(got.unwrap().len(), len);
}
let overflow_tests = vec![
ScalarValue::Int(Some(-32)),
ScalarValue::Int(Some(1025)),
ScalarValue::Int(Some(0)),
];
for len in overflow_tests {
assert!(RpnFnScalarEvaluator::new()
.push_param(len)
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.is_err(),);
}
//test NULL case
assert!(RpnFnScalarEvaluator::new()
.push_param(ScalarValue::Int(None))
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.unwrap()
.is_none())
}
#[test]
fn test_password() {
let cases = vec![
("TiKV", "*cca644408381f962dba8dfb9889db1371ee74208"),
("Pingcap", "*f33bc75eac70ac317621fbbfa560d6251c43cf8a"),
("rust", "*090c2b08e0c1776910e777b917c2185be6554c2e"),
("database", "*02e86b4af5219d0ba6c974908aea62d42eb7da24"),
("raft", "*b23a77787ed44e62ef2570f03ce8982d119fb699"),
];
for (input, output) in cases {
let res = RpnFnScalarEvaluator::new()
.push_param(Some(Bytes::from(input)))
.evaluate::<Bytes>(ScalarFuncSig::Password)
.unwrap();
assert_eq!(res, Some(Bytes::from(output)))
}
// test for null
let res = RpnFnScalarEvaluator::new()
.push_param(ScalarValue::Bytes(None))
.evaluate::<Bytes>(ScalarFuncSig::Password)
.unwrap();
assert_eq!(None, res)
}
} | (vec![], "d41d8cd98f00b204e9800998ecf8427e"),
(b"a".to_vec(), "0cc175b9c0f1b6a831c399e269772661"),
(b"ab".to_vec(), "187ef4436122d1cc2f40dc2b92f0eba0"), | random_line_split |
impl_encryption.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use openssl::hash::{self, MessageDigest};
use tidb_query_codegen::rpn_fn;
use tidb_query_datatype::expr::{Error, EvalContext};
use tidb_query_common::Result;
use tidb_query_datatype::codec::data_type::*;
use tidb_query_shared_expr::rand::{gen_random_bytes, MAX_RAND_BYTES_LENGTH};
const SHA0: i64 = 0;
const SHA224: i64 = 224;
const SHA256: i64 = 256;
const SHA384: i64 = 384;
const SHA512: i64 = 512;
#[rpn_fn(nullable)]
#[inline]
pub fn md5(arg: Option<BytesRef>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => hex_digest(MessageDigest::md5(), arg).map(Some),
None => Ok(None),
}
}
#[rpn_fn(nullable)]
#[inline]
pub fn sha1(arg: Option<BytesRef>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => hex_digest(MessageDigest::sha1(), arg).map(Some),
None => Ok(None),
}
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn sha2(
ctx: &mut EvalContext,
input: Option<BytesRef>,
hash_length: Option<&Int>,
) -> Result<Option<Bytes>> {
match (input, hash_length) {
(Some(input), Some(hash_length)) => {
let sha2 = match *hash_length {
SHA0 | SHA256 => MessageDigest::sha256(),
SHA224 => MessageDigest::sha224(),
SHA384 => MessageDigest::sha384(),
SHA512 => MessageDigest::sha512(),
_ => {
ctx.warnings
.append_warning(Error::incorrect_parameters("sha2"));
return Ok(None);
}
};
hex_digest(sha2, input).map(Some)
}
_ => Ok(None),
}
}
// https://dev.mysql.com/doc/refman/5.7/en/password-hashing.html
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn password(ctx: &mut EvalContext, input: Option<BytesRef>) -> Result<Option<Bytes>> {
ctx.warnings.append_warning(Error::Other(box_err!(
"Warning: Deprecated syntax PASSWORD"
)));
match input {
Some(bytes) => {
if bytes.is_empty() {
Ok(Some(Vec::new()))
} else {
let hash1 = hex_digest(MessageDigest::sha1(), bytes)?;
let mut hash2 = hex_digest(MessageDigest::sha1(), hash1.as_slice())?;
hash2.insert(0, b'*');
Ok(Some(hash2))
}
}
None => Ok(None),
}
}
#[inline]
fn hex_digest(hashtype: MessageDigest, input: &[u8]) -> Result<Bytes> {
hash::hash(hashtype, input)
.map(|digest| hex::encode(digest).into_bytes())
.map_err(|e| box_err!("OpenSSL error: {:?}", e))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn uncompressed_length(ctx: &mut EvalContext, arg: Option<BytesRef>) -> Result<Option<Int>> {
use byteorder::{ByteOrder, LittleEndian};
Ok(arg.as_ref().map(|s| {
if s.is_empty() {
0
} else if s.len() <= 4 {
ctx.warnings.append_warning(Error::zlib_data_corrupted());
0
} else {
Int::from(LittleEndian::read_u32(&s[0..4]))
}
}))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn random_bytes(_ctx: &mut EvalContext, arg: Option<&Int>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => {
if *arg < 1 || *arg > MAX_RAND_BYTES_LENGTH {
return Err(Error::overflow("length", "random_bytes").into());
}
Ok(Some(gen_random_bytes(*arg as usize)))
}
_ => Ok(None),
}
}
#[cfg(test)]
mod tests {
use tipb::ScalarFuncSig;
use super::*;
use crate::types::test_util::RpnFnScalarEvaluator;
fn test_unary_func_ok_none<'a, I: EvaluableRef<'a>, O: EvaluableRet>(sig: ScalarFuncSig)
where
O: PartialEq,
Option<I>: Into<ScalarValue>,
Option<O>: From<ScalarValue>,
{
assert_eq!(
None,
RpnFnScalarEvaluator::new()
.push_param(Option::<I>::None)
.evaluate::<O>(sig)
.unwrap()
);
}
#[test]
fn test_md5() {
let test_cases = vec![
(vec![], "d41d8cd98f00b204e9800998ecf8427e"),
(b"a".to_vec(), "0cc175b9c0f1b6a831c399e269772661"),
(b"ab".to_vec(), "187ef4436122d1cc2f40dc2b92f0eba0"),
(b"abc".to_vec(), "900150983cd24fb0d6963f7d28e17f72"),
(b"123".to_vec(), "202cb962ac59075b964b07152d234b70"),
(
"你好".as_bytes().to_vec(),
"7eca689f0d3389d9dea66ae112e5cfd7",
),
(
"分布式データベース".as_bytes().to_vec(),
"63c0354797bd261e2cbf8581147eeeda",
),
(vec![0xc0, 0x80], "b26555f33aedac7b2684438cc5d4d05e"),
(vec![0xED, 0xA0, 0x80], "546d3dc8de10fbf8b448f678a47901e4"),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Md5)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Md5);
}
#[test]
fn test_sha1() {
let test_cases = vec![
(vec![], "da39a3ee5e6b4b0d3255bfef95601890afd80709"),
(b"a".to_vec(), "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"),
(b"ab".to_vec(), "da23614e02469a0d7c7bd1bdab5c9c474b1904dc"),
(b"abc".to_vec(), "a9993e364706816aba3e25717850c26c9cd0d89d"),
(b"123".to_vec(), "40bd001563085fc35165329ea1ff5c5ecbdbbeef"),
(
"你好".as_bytes().to_vec(),
"440ee0853ad1e99f962b63e459ef992d7c211722",
),
(
"分布式データベース".as_bytes().to_vec(),
"82aa64080df2ca37550ddfc3419d75ac1df3e0d0",
),
(vec![0xc0, 0x80], "8bf4822782a21d7ac68ece130ac36987548003bd"),
(
vec![0xED, 0xA0, 0x80],
"10db70ec072d000c68dd95879f9b831e43a859fd",
),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Sha1)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Sha1);
}
#[test]
fn test_uncompressed_length() {
let cases = vec![
(Some(""), Some(0)),
(
Some("0B000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(11),
),
(
Some("0C000000789CCB48CDC9C95728CF2F32303402001D8004202E"),
Some(12),
),
(Some("020000000000"), Some(2)),
(Some("0000000001"), Some(0)),
(
Some("02000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(2),
),
(Some("010203"), Some(0)),
(Some("01020304"), Some(0)),
(None, None),
];
for (s, exp) in cases {
let s = s.map(|inner| hex::decode(inner.as_bytes().to_vec()).unwrap());
let output = RpnFnScalarEvaluator::new()
.push_param(s)
.evaluate(ScalarFuncSig::UncompressedLength)
.unwrap();
assert_eq!(output, exp);
}
}
#[test]
fn test_sha2() {
let cases = vec![
| "pingcap", 0, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 224, "cd036dc9bec69e758401379c522454ea24a6327b48724b449b40c6b7"),
("pingcap", 256, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 384, "c50955b6b0c7b9919740d956849eedcb0f0f90bf8a34e8c1f4e071e3773f53bd6f8f16c04425ff728bed04de1b63db51"),
("pingcap", 512, "ea903c574370774c4844a83b7122105a106e04211673810e1baae7c2ae7aba2cf07465e02f6c413126111ef74a417232683ce7ba210052e63c15fc82204aad80"),
("13572468", 0, "1c91ab1c162fd0cae60a5bb9880f3e7d5a133a65b6057a644b26973d9c55dcfe"),
("13572468", 224, "8ad67735bbf49576219f364f4640d595357a440358d15bf6815a16e4"),
("13572468", 256, "1c91ab1c162fd0cae60a5bb9880f3e7d5a133a65b6057a644b26973d9c55dcfe"),
("13572468.123", 384, "3b4ee302435dc1e15251efd9f3982b1ca6fe4ac778d3260b7bbf3bea613849677eda830239420e448e4c6dc7c2649d89"),
("13572468.123", 512, "4820aa3f2760836557dc1f2d44a0ba7596333fdb60c8a1909481862f4ab0921c00abb23d57b7e67a970363cc3fcb78b25b6a0d45cdcac0e87aa0c96bc51f7f96"),
];
for (input_str, hash_length_i64, exp_str) in cases {
let exp = Some(Bytes::from(exp_str));
let got = RpnFnScalarEvaluator::new()
.push_param(Some(Bytes::from(input_str)))
.push_param(Some(Int::from(hash_length_i64)))
.evaluate::<Bytes>(ScalarFuncSig::Sha2)
.unwrap();
assert_eq!(got, exp, "sha2('{:?}', {:?})", input_str, hash_length_i64);
}
let null_cases = vec![
(ScalarValue::Bytes(None), ScalarValue::Int(Some(1))),
(
ScalarValue::Bytes(Some(b"13572468".to_vec())),
ScalarValue::Int(None),
),
(ScalarValue::Bytes(None), ScalarValue::Int(None)),
(
ScalarValue::Bytes(Some(b"pingcap".to_vec())),
ScalarValue::Int(Some(-1)),
),
(
ScalarValue::Bytes(Some(b"13572468".to_vec())),
ScalarValue::Int(Some(999)),
),
];
for (input_str, hash_length_i64) in null_cases {
assert!(RpnFnScalarEvaluator::new()
.push_param(input_str)
.push_param(hash_length_i64)
.evaluate::<Bytes>(ScalarFuncSig::Sha2)
.unwrap()
.is_none())
}
}
#[test]
fn test_random_bytes() {
let cases = vec![1, 32, 233, 1024];
for len in cases {
let got = RpnFnScalarEvaluator::new()
.push_param(Some(Int::from(len as i64)))
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.unwrap();
assert_eq!(got.unwrap().len(), len);
}
let overflow_tests = vec![
ScalarValue::Int(Some(-32)),
ScalarValue::Int(Some(1025)),
ScalarValue::Int(Some(0)),
];
for len in overflow_tests {
assert!(RpnFnScalarEvaluator::new()
.push_param(len)
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.is_err(),);
}
//test NULL case
assert!(RpnFnScalarEvaluator::new()
.push_param(ScalarValue::Int(None))
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.unwrap()
.is_none())
}
#[test]
fn test_password() {
let cases = vec![
("TiKV", "*cca644408381f962dba8dfb9889db1371ee74208"),
("Pingcap", "*f33bc75eac70ac317621fbbfa560d6251c43cf8a"),
("rust", "*090c2b08e0c1776910e777b917c2185be6554c2e"),
("database", "*02e86b4af5219d0ba6c974908aea62d42eb7da24"),
("raft", "*b23a77787ed44e62ef2570f03ce8982d119fb699"),
];
for (input, output) in cases {
let res = RpnFnScalarEvaluator::new()
.push_param(Some(Bytes::from(input)))
.evaluate::<Bytes>(ScalarFuncSig::Password)
.unwrap();
assert_eq!(res, Some(Bytes::from(output)))
}
// test for null
let res = RpnFnScalarEvaluator::new()
.push_param(ScalarValue::Bytes(None))
.evaluate::<Bytes>(ScalarFuncSig::Password)
.unwrap();
assert_eq!(None, res)
}
}
| ( | identifier_name |
d.rs | */
use crate::algo::graph::flow::*;
use crate::algo::graph::*;
use crate::util::grid::constants::*;
use crate::util::grid::{Grid, GridCoord, GridRowColVec};
use crate::util::input::*;
//use std::thread;
use bimap::BiMap;
use bit_vec::BitVec;
use indexmap::IndexSet;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::default::Default;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::sync::mpsc::channel;
use std::time::Instant;
use threadpool::ThreadPool;
pub fn solve_all_cases()
{
run_cases(
&["D-small-practice", "D-large-practice"],
"y2017round2",
|reader, buffer| {
//let mut children: Vec<thread::JoinHandle<_>> = vec![];
let pool = ThreadPool::new(6);
let (tx, rx) = channel();
let t = reader.read_int();
for case in 1..=t {
let (C, R, M) = reader.read_tuple_3::<usize>();
let mut grid: Grid<Tile> = Grid::new(R, C);
for r in 0..R {
let row = reader.read_chars(C);
for (c, t) in row.iter().enumerate() {
grid[(r, c)] = Tile::from(*t);
}
}
let tx = tx.clone();
pool.execute(move || {
let now = Instant::now();
let _ = writeln!(::std::io::stderr(), "Starting {} of {} ", case, t);
let s = solve(case, &mut grid, M);
tx.send((case, s)).expect("Channel is there");
let duration = now.elapsed();
let secs = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1e9f64;
let _ = writeln!(
::std::io::stderr(),
"Finished #{} in {:.2} second(s)",
case,
secs
);
});
}
let mut output = rx.iter().take(t as usize).collect::<Vec<_>>();
output.sort();
for (_, s) in output {
write!(buffer, "{}", s).unwrap();
}
},
);
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum Tile
{
Empty,
Building,
Soldier,
Turret,
}
use self::Tile::*;
use crate::util::codejam::run_cases;
impl Tile
{
fn to_char(self) -> char
{
match self {
Empty => '.',
Building => '#',
Soldier => 'S',
Turret => 'T',
}
}
}
impl From<char> for Tile
{
fn from(item: char) -> Self
{
match item {
'.' => Empty,
'#' => Building,
'S' => Soldier,
'T' => Turret,
_ => panic!("Character not recognized: {}", item),
}
}
}
impl Display for Tile
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
write!(f, "{}", self.to_char())
}
}
impl Default for Tile
{
fn default() -> Tile
{
Empty
}
}
//problem specific code
fn reachable(grid: &Grid<Tile>, location: &GridCoord) -> HashSet<GridRowColVec>
{
let mut r = HashSet::new();
//debug!("\nTracing {} starting at {}", location, direction);
for direction in DIRECTIONS.iter() {
let mut loc: GridRowColVec = location.convert();
for _ in 0..=grid.R + grid.C {
loc += direction;
if let Some(tile) = grid.get_value(&loc) {
match *tile {
Building => {
break;
}
_ => {
r.insert(loc.clone());
}
};
} else {
break;
}
}
}
r
}
/*
impl<L, R> FromIterator<(L, R)> for BiMap<L, R>
{
fn from_iter<I: IntoIterator<Item = (L, R)>>(iter: I) -> Self
{
let mut c = BiMap::new();
for i in iter {
c.insert(i.0, i.1);
}
c
}
}*/
fn solve<'a>(case_no: u32, grid: &mut Grid<Tile>, M_soldier_limit: usize) -> String
{
debug!(
"Solving case {}\nM={}\n{}\n",
case_no, M_soldier_limit, grid
);
//original solider & turret index to location map
let S_map = grid
.filter_by_val(&Soldier)
.enumerate()
.collect::<BiMap<_, _>>();
let turret_locations = grid.filter_by_val(&Turret).collect::<Vec<_>>();
//precalucate what squares a turret can reach
let turret_reachable_squares_list = turret_locations
.iter()
.map(|t_loc| reachable(&grid, &t_loc))
.collect::<Vec<_>>();
let T_map = turret_locations
.into_iter()
.enumerate()
.collect::<BiMap<_, _>>();
let S = grid.filter_by_val(&Soldier).count();
let T = grid.filter_by_val(&Turret).count();
//Construct the initial Graph
let G_edges = build_graph(
&grid,
false,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
let mut G = FlowGraph::new(2 + S + T, 4);
for uv in G_edges {
G.add_edge(uv.0, uv.1, 1, 1);
}
let source = S + T;
let sink = S + T + 1;
let vertex_to_string = |v: usize| match v {
s if s < S => format!("Soldier #{} ({:?})", s + 1, *S_map.get_by_left(&s).unwrap()),
t if t >= S && t < S + T => format!(
"Turret #{} ({:?})",
t - S + 1,
*T_map.get_by_left(&(t - S)).unwrap()
),
v if v == sink => "Sink".to_string(),
_source => "Source".to_string(),
};
//BFS for each soldier
//will be in left to right order, then top down order
//Now find max matching of G (G has an edge from soldier s to turret t if and only if soldier s can destroy turret t after all other turrets have been destroyed)
for s in 0..S {
G.add_edge(source, s, 1, 1);
}
for t in S..S + T {
G.add_edge(t, sink, 1, 1);
}
let (R, flow) = G.dinic(source, sink);
let mut ans = format!("Case #{}: {}\n", case_no, R);
//Compute initial matching
let mut M = flow
.iter()
.enumerate()
.filter(|&(_e, f)| *f > 0)
//map to u->v
.map(|(e, _f)| (G.graph.endp[e ^ 1], G.graph.endp[e]))
//leave out source and sink nodes
.filter(|&(u, v)| u!= source && v!= sink)
.collect::<Vec<_>>();
debug!(
"Edges in M initial matching=\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let mut r = R;
while r > 0 {
//Let us define the graph G' with the same nodes as G, but an edge between soldier s and turret t only exists in G' if s can destroy t with the other turrets active
let Gprime = build_graph(
&grid,
true,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
//Now build graph H
let mut H = Graph::new(S + T, 4);
let soldiers_in_m = M.iter().map(|&(s, _t)| s).collect::<Vec<_>>();
for &(s, t) in Gprime.iter() {
if soldiers_in_m.contains(&s) {
H.add_edge(s, t);
}
}
for &(s, t) in M.iter() {
H.add_edge(t, s);
}
debug!(
"Current matching M =\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in G'=\n{}\n",
Gprime
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in H=\n{}\n",
H.edges()
.map(|(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let turrets_in_M = M.iter().map(|&(_s, t)| t).collect::<Vec<_>>();
//find an edge (s,t') where t' is not in m
let st_prime = Gprime.iter().find(|&(_s, t)|!turrets_in_M.contains(t));
if st_prime.is_some() {
let &(s, t) = st_prime.unwrap();
debug!("Found (s,t') s={} t'={}", s, t - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
//Also remove from current matching
let to_remove = M
.iter()
.position(|&(s_in_m, _t)| s_in_m == s)
.expect("Soldier should be in mapping");
M.remove(to_remove);
continue;
}
//Now we need to find a cycle
//Start at a soldier in H
let soldier_in_h = H.edges().filter(|&(u, _v)| u <= S).next().unwrap().0;
let mut cycle_edges = VecDeque::new();
let mut edge = (
soldier_in_h,
H.adj_list_with_edges(soldier_in_h).next().unwrap().1,
);
let mut visited = BitVec::from_elem(H.num_v(), false);
| cycle_edges.push_back(edge);
debug!(
"pushed Edge {:?} ",
format!("{}->{}", vertex_to_string(edge.0), vertex_to_string(edge.1))
);
//adj list returns an (internal edge index, next vertex)
edge = (edge.1, H.adj_list_with_edges(edge.1).next().unwrap().1);
debug!("Edge {:?} ", edge);
}
//cut to the actual cycle found
let cycle_end = cycle_edges.back().unwrap().1;
let cycle_start = cycle_edges
.iter()
.position(|&(u, _v)| u == cycle_end)
.unwrap();
cycle_edges.drain(0..cycle_start);
debug!(
"Cycle C =\n{}\n",
cycle_edges
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Consider a new matching M' of G consisting of the edges of M whose reverse is not in C, p
// lus the edges in C whose reverse is not in M. That is, M' is M but exchanging the edges
// present in C in some direction. M' in this case is also a matching of G of the same size as M
//because it is a cycle, we know we have new edges from G' to replace the ones removed from M
let mut M_new: Vec<(usize, usize)> = Vec::new();
M_new.extend(M.iter().filter(|&&(u, v)|!cycle_edges.contains(&(v, u))));
M_new.extend(cycle_edges.iter().filter(|&&(u, v)|!M.contains(&(v, u))));
debug!(
"New matching M =\n{}\n",
M_new
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Find all edges from G' which are actions we can take
let st_actions = M_new
.iter()
.filter(|&uv| Gprime.contains(uv))
.collect::<Vec<_>>();
for &&(s, t) in st_actions.iter() {
debug!("Taking actions from g' s {} t {}", s + 1, t + 1 - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
}
M = M_new;
}
ans
}
fn build_graph(
grid: &Grid<Tile>,
is_g_prime: bool,
M: usize,
s_mapping: &BiMap<usize, GridCoord>,
t_mapping: &BiMap<usize, GridCoord>,
turret_reachable_squares_list: &Vec<HashSet<GridRowColVec>>,
) -> IndexSet<(usize, usize)>
{
let mut G: IndexSet<(usize, usize)> = IndexSet::new();
let turret_locations = grid.filter_by_val(&Turret).collect::<HashSet<_>>();
/*
for (turret_index, turret_squares) in turret_squares_list.iter().enumerate() {
debug!("Turret {} can see {:?}", turret_index, turret_squares);
}
*/
let soldier_locations = grid.filter_by_val(&Soldier).collect::<Vec<_>>();
let S = soldier_locations.len();
let T = turret_reachable_squares_list.len();
for (_soldier_index, soldier_loc) in soldier_locations.iter().enumerate() {
//debug!("BFS search on soldier {} @ {}", soldier_index, soldier_loc);
//Node is location, distance, seen_turret
let mut queue: VecDeque<(GridRowColVec, usize, bool)> = VecDeque::new();
let mut visited = BitVec::from_elem(grid.C * grid.R, false);
queue.push_back((soldier_loc.convert(), 0, false));
visited.set(soldier_loc.data[0] * grid.C + soldier_loc.data[1], true);
while!queue.is_empty() {
let (loc, dist, seen_turret) = queue.pop_front().unwrap();
let visible_turrets = turret_reachable_squares_list
.iter()
.enumerate()
.filter(|(turret_index, turret_squares)| {
turret_locations.contains(t_mapping.get_by_left(turret_index).unwrap())
&& turret_squares.contains(&loc)
})
.map(|(turret_index, _)| turret_index);
let mut turret_visible = false;
for turret_index in visible_turrets {
turret_visible = true;
if!is_g_prime || (!seen_turret && is_g_prime) {
let s_vertex = *s_mapping.get_by_right(soldier_loc).unwrap();
//The turret index is already using the original grids index
/*debug!("Found s{} t{} mapped to soldier {} => {} at loc {}",
soldier_index, turret_index, s_vertex, t_vertex, loc);*/
G.insert((s_vertex, s_mapping.len() + turret_index));
}
}
//no need to queue once we have been shot by a turret
if is_g_prime && turret_visible {
continue;
}
/*
debug!(
"Viewing {} dist {} seen turret? {} turret visible? {}",
loc, dist, seen_turret, turret_visible
);*/
for dir in DIRECTIONS.iter() {
let new_loc = loc.clone() + dir;
if let Some(tile) = grid.get_value(&new_loc) {
if *tile == Building {
continue;
}
let newLocIndex = (new_loc.data[0] * grid.C as i64 + new_loc.data[1]) as usize;
if visited[newLocIndex] {
continue;
}
visited.set(newLocIndex, true);
let new_dist = dist + 1;
if new_dist > M {
continue;
}
let new_seen_turret = seen_turret || turret_visible;
queue.push_back((new_loc, new_dist, new_seen_turret));
}
}
}
}
debug!("Built graph from\n{}\n S={} T={}", grid, S, T);
G
}
impl Display for Grid<Tile>
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
for r in 0..self.R {
for c in 0..self.C {
if let Err(err) = write!(f, "{}", self[(r, c)]) {
return Err(err);
}
}
if let | while !visited[edge.0] {
visited.set(edge.0, true); | random_line_split |
d.rs |
use crate::algo::graph::flow::*;
use crate::algo::graph::*;
use crate::util::grid::constants::*;
use crate::util::grid::{Grid, GridCoord, GridRowColVec};
use crate::util::input::*;
//use std::thread;
use bimap::BiMap;
use bit_vec::BitVec;
use indexmap::IndexSet;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::default::Default;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::sync::mpsc::channel;
use std::time::Instant;
use threadpool::ThreadPool;
pub fn solve_all_cases()
{
run_cases(
&["D-small-practice", "D-large-practice"],
"y2017round2",
|reader, buffer| {
//let mut children: Vec<thread::JoinHandle<_>> = vec![];
let pool = ThreadPool::new(6);
let (tx, rx) = channel();
let t = reader.read_int();
for case in 1..=t {
let (C, R, M) = reader.read_tuple_3::<usize>();
let mut grid: Grid<Tile> = Grid::new(R, C);
for r in 0..R {
let row = reader.read_chars(C);
for (c, t) in row.iter().enumerate() {
grid[(r, c)] = Tile::from(*t);
}
}
let tx = tx.clone();
pool.execute(move || {
let now = Instant::now();
let _ = writeln!(::std::io::stderr(), "Starting {} of {} ", case, t);
let s = solve(case, &mut grid, M);
tx.send((case, s)).expect("Channel is there");
let duration = now.elapsed();
let secs = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1e9f64;
let _ = writeln!(
::std::io::stderr(),
"Finished #{} in {:.2} second(s)",
case,
secs
);
});
}
let mut output = rx.iter().take(t as usize).collect::<Vec<_>>();
output.sort();
for (_, s) in output {
write!(buffer, "{}", s).unwrap();
}
},
);
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum Tile
{
Empty,
Building,
Soldier,
Turret,
}
use self::Tile::*;
use crate::util::codejam::run_cases;
impl Tile
{
fn to_char(self) -> char
{
match self {
Empty => '.',
Building => '#',
Soldier => 'S',
Turret => 'T',
}
}
}
impl From<char> for Tile
{
fn from(item: char) -> Self
{
match item {
'.' => Empty,
'#' => Building,
'S' => Soldier,
'T' => Turret,
_ => panic!("Character not recognized: {}", item),
}
}
}
impl Display for Tile
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
write!(f, "{}", self.to_char())
}
}
impl Default for Tile
{
fn default() -> Tile
{
Empty
}
}
//problem specific code
fn reachable(grid: &Grid<Tile>, location: &GridCoord) -> HashSet<GridRowColVec>
| break;
}
}
}
r
}
/*
impl<L, R> FromIterator<(L, R)> for BiMap<L, R>
{
fn from_iter<I: IntoIterator<Item = (L, R)>>(iter: I) -> Self
{
let mut c = BiMap::new();
for i in iter {
c.insert(i.0, i.1);
}
c
}
}*/
fn solve<'a>(case_no: u32, grid: &mut Grid<Tile>, M_soldier_limit: usize) -> String
{
debug!(
"Solving case {}\nM={}\n{}\n",
case_no, M_soldier_limit, grid
);
//original solider & turret index to location map
let S_map = grid
.filter_by_val(&Soldier)
.enumerate()
.collect::<BiMap<_, _>>();
let turret_locations = grid.filter_by_val(&Turret).collect::<Vec<_>>();
//precalucate what squares a turret can reach
let turret_reachable_squares_list = turret_locations
.iter()
.map(|t_loc| reachable(&grid, &t_loc))
.collect::<Vec<_>>();
let T_map = turret_locations
.into_iter()
.enumerate()
.collect::<BiMap<_, _>>();
let S = grid.filter_by_val(&Soldier).count();
let T = grid.filter_by_val(&Turret).count();
//Construct the initial Graph
let G_edges = build_graph(
&grid,
false,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
let mut G = FlowGraph::new(2 + S + T, 4);
for uv in G_edges {
G.add_edge(uv.0, uv.1, 1, 1);
}
let source = S + T;
let sink = S + T + 1;
let vertex_to_string = |v: usize| match v {
s if s < S => format!("Soldier #{} ({:?})", s + 1, *S_map.get_by_left(&s).unwrap()),
t if t >= S && t < S + T => format!(
"Turret #{} ({:?})",
t - S + 1,
*T_map.get_by_left(&(t - S)).unwrap()
),
v if v == sink => "Sink".to_string(),
_source => "Source".to_string(),
};
//BFS for each soldier
//will be in left to right order, then top down order
//Now find max matching of G (G has an edge from soldier s to turret t if and only if soldier s can destroy turret t after all other turrets have been destroyed)
for s in 0..S {
G.add_edge(source, s, 1, 1);
}
for t in S..S + T {
G.add_edge(t, sink, 1, 1);
}
let (R, flow) = G.dinic(source, sink);
let mut ans = format!("Case #{}: {}\n", case_no, R);
//Compute initial matching
let mut M = flow
.iter()
.enumerate()
.filter(|&(_e, f)| *f > 0)
//map to u->v
.map(|(e, _f)| (G.graph.endp[e ^ 1], G.graph.endp[e]))
//leave out source and sink nodes
.filter(|&(u, v)| u!= source && v!= sink)
.collect::<Vec<_>>();
debug!(
"Edges in M initial matching=\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let mut r = R;
while r > 0 {
//Let us define the graph G' with the same nodes as G, but an edge between soldier s and turret t only exists in G' if s can destroy t with the other turrets active
let Gprime = build_graph(
&grid,
true,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
//Now build graph H
let mut H = Graph::new(S + T, 4);
let soldiers_in_m = M.iter().map(|&(s, _t)| s).collect::<Vec<_>>();
for &(s, t) in Gprime.iter() {
if soldiers_in_m.contains(&s) {
H.add_edge(s, t);
}
}
for &(s, t) in M.iter() {
H.add_edge(t, s);
}
debug!(
"Current matching M =\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in G'=\n{}\n",
Gprime
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in H=\n{}\n",
H.edges()
.map(|(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let turrets_in_M = M.iter().map(|&(_s, t)| t).collect::<Vec<_>>();
//find an edge (s,t') where t' is not in m
let st_prime = Gprime.iter().find(|&(_s, t)|!turrets_in_M.contains(t));
if st_prime.is_some() {
let &(s, t) = st_prime.unwrap();
debug!("Found (s,t') s={} t'={}", s, t - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
//Also remove from current matching
let to_remove = M
.iter()
.position(|&(s_in_m, _t)| s_in_m == s)
.expect("Soldier should be in mapping");
M.remove(to_remove);
continue;
}
//Now we need to find a cycle
//Start at a soldier in H
let soldier_in_h = H.edges().filter(|&(u, _v)| u <= S).next().unwrap().0;
let mut cycle_edges = VecDeque::new();
let mut edge = (
soldier_in_h,
H.adj_list_with_edges(soldier_in_h).next().unwrap().1,
);
let mut visited = BitVec::from_elem(H.num_v(), false);
while!visited[edge.0] {
visited.set(edge.0, true);
cycle_edges.push_back(edge);
debug!(
"pushed Edge {:?} ",
format!("{}->{}", vertex_to_string(edge.0), vertex_to_string(edge.1))
);
//adj list returns an (internal edge index, next vertex)
edge = (edge.1, H.adj_list_with_edges(edge.1).next().unwrap().1);
debug!("Edge {:?} ", edge);
}
//cut to the actual cycle found
let cycle_end = cycle_edges.back().unwrap().1;
let cycle_start = cycle_edges
.iter()
.position(|&(u, _v)| u == cycle_end)
.unwrap();
cycle_edges.drain(0..cycle_start);
debug!(
"Cycle C =\n{}\n",
cycle_edges
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Consider a new matching M' of G consisting of the edges of M whose reverse is not in C, p
// lus the edges in C whose reverse is not in M. That is, M' is M but exchanging the edges
// present in C in some direction. M' in this case is also a matching of G of the same size as M
//because it is a cycle, we know we have new edges from G' to replace the ones removed from M
let mut M_new: Vec<(usize, usize)> = Vec::new();
M_new.extend(M.iter().filter(|&&(u, v)|!cycle_edges.contains(&(v, u))));
M_new.extend(cycle_edges.iter().filter(|&&(u, v)|!M.contains(&(v, u))));
debug!(
"New matching M =\n{}\n",
M_new
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Find all edges from G' which are actions we can take
let st_actions = M_new
.iter()
.filter(|&uv| Gprime.contains(uv))
.collect::<Vec<_>>();
for &&(s, t) in st_actions.iter() {
debug!("Taking actions from g' s {} t {}", s + 1, t + 1 - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
}
M = M_new;
}
ans
}
fn build_graph(
grid: &Grid<Tile>,
is_g_prime: bool,
M: usize,
s_mapping: &BiMap<usize, GridCoord>,
t_mapping: &BiMap<usize, GridCoord>,
turret_reachable_squares_list: &Vec<HashSet<GridRowColVec>>,
) -> IndexSet<(usize, usize)>
{
let mut G: IndexSet<(usize, usize)> = IndexSet::new();
let turret_locations = grid.filter_by_val(&Turret).collect::<HashSet<_>>();
/*
for (turret_index, turret_squares) in turret_squares_list.iter().enumerate() {
debug!("Turret {} can see {:?}", turret_index, turret_squares);
}
*/
let soldier_locations = grid.filter_by_val(&Soldier).collect::<Vec<_>>();
let S = soldier_locations.len();
let T = turret_reachable_squares_list.len();
for (_soldier_index, soldier_loc) in soldier_locations.iter().enumerate() {
//debug!("BFS search on soldier {} @ {}", soldier_index, soldier_loc);
//Node is location, distance, seen_turret
let mut queue: VecDeque<(GridRowColVec, usize, bool)> = VecDeque::new();
let mut visited = BitVec::from_elem(grid.C * grid.R, false);
queue.push_back((soldier_loc.convert(), 0, false));
visited.set(soldier_loc.data[0] * grid.C + soldier_loc.data[1], true);
while!queue.is_empty() {
let (loc, dist, seen_turret) = queue.pop_front().unwrap();
let visible_turrets = turret_reachable_squares_list
.iter()
.enumerate()
.filter(|(turret_index, turret_squares)| {
turret_locations.contains(t_mapping.get_by_left(turret_index).unwrap())
&& turret_squares.contains(&loc)
})
.map(|(turret_index, _)| turret_index);
let mut turret_visible = false;
for turret_index in visible_turrets {
turret_visible = true;
if!is_g_prime || (!seen_turret && is_g_prime) {
let s_vertex = *s_mapping.get_by_right(soldier_loc).unwrap();
//The turret index is already using the original grids index
/*debug!("Found s{} t{} mapped to soldier {} => {} at loc {}",
soldier_index, turret_index, s_vertex, t_vertex, loc);*/
G.insert((s_vertex, s_mapping.len() + turret_index));
}
}
//no need to queue once we have been shot by a turret
if is_g_prime && turret_visible {
continue;
}
/*
debug!(
"Viewing {} dist {} seen turret? {} turret visible? {}",
loc, dist, seen_turret, turret_visible
);*/
for dir in DIRECTIONS.iter() {
let new_loc = loc.clone() + dir;
if let Some(tile) = grid.get_value(&new_loc) {
if *tile == Building {
continue;
}
let newLocIndex = (new_loc.data[0] * grid.C as i64 + new_loc.data[1]) as usize;
if visited[newLocIndex] {
continue;
}
visited.set(newLocIndex, true);
let new_dist = dist + 1;
if new_dist > M {
continue;
}
let new_seen_turret = seen_turret || turret_visible;
queue.push_back((new_loc, new_dist, new_seen_turret));
}
}
}
}
debug!("Built graph from\n{}\n S={} T={}", grid, S, T);
G
}
impl Display for Grid<Tile>
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
for r in 0..self.R {
for c in 0..self.C {
if let Err(err) = write!(f, "{}", self[(r, c)]) {
return Err(err);
}
}
if | {
let mut r = HashSet::new();
//debug!("\nTracing {} starting at {}", location, direction);
for direction in DIRECTIONS.iter() {
let mut loc: GridRowColVec = location.convert();
for _ in 0..=grid.R + grid.C {
loc += direction;
if let Some(tile) = grid.get_value(&loc) {
match *tile {
Building => {
break;
}
_ => {
r.insert(loc.clone());
}
};
} else { | identifier_body |
d.rs |
use crate::algo::graph::flow::*;
use crate::algo::graph::*;
use crate::util::grid::constants::*;
use crate::util::grid::{Grid, GridCoord, GridRowColVec};
use crate::util::input::*;
//use std::thread;
use bimap::BiMap;
use bit_vec::BitVec;
use indexmap::IndexSet;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::default::Default;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::sync::mpsc::channel;
use std::time::Instant;
use threadpool::ThreadPool;
pub fn solve_all_cases()
{
run_cases(
&["D-small-practice", "D-large-practice"],
"y2017round2",
|reader, buffer| {
//let mut children: Vec<thread::JoinHandle<_>> = vec![];
let pool = ThreadPool::new(6);
let (tx, rx) = channel();
let t = reader.read_int();
for case in 1..=t {
let (C, R, M) = reader.read_tuple_3::<usize>();
let mut grid: Grid<Tile> = Grid::new(R, C);
for r in 0..R {
let row = reader.read_chars(C);
for (c, t) in row.iter().enumerate() {
grid[(r, c)] = Tile::from(*t);
}
}
let tx = tx.clone();
pool.execute(move || {
let now = Instant::now();
let _ = writeln!(::std::io::stderr(), "Starting {} of {} ", case, t);
let s = solve(case, &mut grid, M);
tx.send((case, s)).expect("Channel is there");
let duration = now.elapsed();
let secs = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1e9f64;
let _ = writeln!(
::std::io::stderr(),
"Finished #{} in {:.2} second(s)",
case,
secs
);
});
}
let mut output = rx.iter().take(t as usize).collect::<Vec<_>>();
output.sort();
for (_, s) in output {
write!(buffer, "{}", s).unwrap();
}
},
);
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum Tile
{
Empty,
Building,
Soldier,
Turret,
}
use self::Tile::*;
use crate::util::codejam::run_cases;
impl Tile
{
fn to_char(self) -> char
{
match self {
Empty => '.',
Building => '#',
Soldier => 'S',
Turret => 'T',
}
}
}
impl From<char> for Tile
{
fn from(item: char) -> Self
{
match item {
'.' => Empty,
'#' => Building,
'S' => Soldier,
'T' => Turret,
_ => panic!("Character not recognized: {}", item),
}
}
}
impl Display for Tile
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
write!(f, "{}", self.to_char())
}
}
impl Default for Tile
{
fn default() -> Tile
{
Empty
}
}
//problem specific code
fn reachable(grid: &Grid<Tile>, location: &GridCoord) -> HashSet<GridRowColVec>
{
let mut r = HashSet::new();
//debug!("\nTracing {} starting at {}", location, direction);
for direction in DIRECTIONS.iter() {
let mut loc: GridRowColVec = location.convert();
for _ in 0..=grid.R + grid.C {
loc += direction;
if let Some(tile) = grid.get_value(&loc) {
match *tile {
Building => {
break;
}
_ => {
r.insert(loc.clone());
}
};
} else {
break;
}
}
}
r
}
/*
impl<L, R> FromIterator<(L, R)> for BiMap<L, R>
{
fn from_iter<I: IntoIterator<Item = (L, R)>>(iter: I) -> Self
{
let mut c = BiMap::new();
for i in iter {
c.insert(i.0, i.1);
}
c
}
}*/
fn solve<'a>(case_no: u32, grid: &mut Grid<Tile>, M_soldier_limit: usize) -> String
{
debug!(
"Solving case {}\nM={}\n{}\n",
case_no, M_soldier_limit, grid
);
//original solider & turret index to location map
let S_map = grid
.filter_by_val(&Soldier)
.enumerate()
.collect::<BiMap<_, _>>();
let turret_locations = grid.filter_by_val(&Turret).collect::<Vec<_>>();
//precalucate what squares a turret can reach
let turret_reachable_squares_list = turret_locations
.iter()
.map(|t_loc| reachable(&grid, &t_loc))
.collect::<Vec<_>>();
let T_map = turret_locations
.into_iter()
.enumerate()
.collect::<BiMap<_, _>>();
let S = grid.filter_by_val(&Soldier).count();
let T = grid.filter_by_val(&Turret).count();
//Construct the initial Graph
let G_edges = build_graph(
&grid,
false,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
let mut G = FlowGraph::new(2 + S + T, 4);
for uv in G_edges {
G.add_edge(uv.0, uv.1, 1, 1);
}
let source = S + T;
let sink = S + T + 1;
let vertex_to_string = |v: usize| match v {
s if s < S => format!("Soldier #{} ({:?})", s + 1, *S_map.get_by_left(&s).unwrap()),
t if t >= S && t < S + T => format!(
"Turret #{} ({:?})",
t - S + 1,
*T_map.get_by_left(&(t - S)).unwrap()
),
v if v == sink => "Sink".to_string(),
_source => "Source".to_string(),
};
//BFS for each soldier
//will be in left to right order, then top down order
//Now find max matching of G (G has an edge from soldier s to turret t if and only if soldier s can destroy turret t after all other turrets have been destroyed)
for s in 0..S {
G.add_edge(source, s, 1, 1);
}
for t in S..S + T {
G.add_edge(t, sink, 1, 1);
}
let (R, flow) = G.dinic(source, sink);
let mut ans = format!("Case #{}: {}\n", case_no, R);
//Compute initial matching
let mut M = flow
.iter()
.enumerate()
.filter(|&(_e, f)| *f > 0)
//map to u->v
.map(|(e, _f)| (G.graph.endp[e ^ 1], G.graph.endp[e]))
//leave out source and sink nodes
.filter(|&(u, v)| u!= source && v!= sink)
.collect::<Vec<_>>();
debug!(
"Edges in M initial matching=\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let mut r = R;
while r > 0 {
//Let us define the graph G' with the same nodes as G, but an edge between soldier s and turret t only exists in G' if s can destroy t with the other turrets active
let Gprime = build_graph(
&grid,
true,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
//Now build graph H
let mut H = Graph::new(S + T, 4);
let soldiers_in_m = M.iter().map(|&(s, _t)| s).collect::<Vec<_>>();
for &(s, t) in Gprime.iter() {
if soldiers_in_m.contains(&s) {
H.add_edge(s, t);
}
}
for &(s, t) in M.iter() {
H.add_edge(t, s);
}
debug!(
"Current matching M =\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in G'=\n{}\n",
Gprime
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in H=\n{}\n",
H.edges()
.map(|(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let turrets_in_M = M.iter().map(|&(_s, t)| t).collect::<Vec<_>>();
//find an edge (s,t') where t' is not in m
let st_prime = Gprime.iter().find(|&(_s, t)|!turrets_in_M.contains(t));
if st_prime.is_some() {
let &(s, t) = st_prime.unwrap();
debug!("Found (s,t') s={} t'={}", s, t - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
//Also remove from current matching
let to_remove = M
.iter()
.position(|&(s_in_m, _t)| s_in_m == s)
.expect("Soldier should be in mapping");
M.remove(to_remove);
continue;
}
//Now we need to find a cycle
//Start at a soldier in H
let soldier_in_h = H.edges().filter(|&(u, _v)| u <= S).next().unwrap().0;
let mut cycle_edges = VecDeque::new();
let mut edge = (
soldier_in_h,
H.adj_list_with_edges(soldier_in_h).next().unwrap().1,
);
let mut visited = BitVec::from_elem(H.num_v(), false);
while!visited[edge.0] {
visited.set(edge.0, true);
cycle_edges.push_back(edge);
debug!(
"pushed Edge {:?} ",
format!("{}->{}", vertex_to_string(edge.0), vertex_to_string(edge.1))
);
//adj list returns an (internal edge index, next vertex)
edge = (edge.1, H.adj_list_with_edges(edge.1).next().unwrap().1);
debug!("Edge {:?} ", edge);
}
//cut to the actual cycle found
let cycle_end = cycle_edges.back().unwrap().1;
let cycle_start = cycle_edges
.iter()
.position(|&(u, _v)| u == cycle_end)
.unwrap();
cycle_edges.drain(0..cycle_start);
debug!(
"Cycle C =\n{}\n",
cycle_edges
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Consider a new matching M' of G consisting of the edges of M whose reverse is not in C, p
// lus the edges in C whose reverse is not in M. That is, M' is M but exchanging the edges
// present in C in some direction. M' in this case is also a matching of G of the same size as M
//because it is a cycle, we know we have new edges from G' to replace the ones removed from M
let mut M_new: Vec<(usize, usize)> = Vec::new();
M_new.extend(M.iter().filter(|&&(u, v)|!cycle_edges.contains(&(v, u))));
M_new.extend(cycle_edges.iter().filter(|&&(u, v)|!M.contains(&(v, u))));
debug!(
"New matching M =\n{}\n",
M_new
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Find all edges from G' which are actions we can take
let st_actions = M_new
.iter()
.filter(|&uv| Gprime.contains(uv))
.collect::<Vec<_>>();
for &&(s, t) in st_actions.iter() {
debug!("Taking actions from g' s {} t {}", s + 1, t + 1 - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
}
M = M_new;
}
ans
}
fn build_graph(
grid: &Grid<Tile>,
is_g_prime: bool,
M: usize,
s_mapping: &BiMap<usize, GridCoord>,
t_mapping: &BiMap<usize, GridCoord>,
turret_reachable_squares_list: &Vec<HashSet<GridRowColVec>>,
) -> IndexSet<(usize, usize)>
{
let mut G: IndexSet<(usize, usize)> = IndexSet::new();
let turret_locations = grid.filter_by_val(&Turret).collect::<HashSet<_>>();
/*
for (turret_index, turret_squares) in turret_squares_list.iter().enumerate() {
debug!("Turret {} can see {:?}", turret_index, turret_squares);
}
*/
let soldier_locations = grid.filter_by_val(&Soldier).collect::<Vec<_>>();
let S = soldier_locations.len();
let T = turret_reachable_squares_list.len();
for (_soldier_index, soldier_loc) in soldier_locations.iter().enumerate() {
//debug!("BFS search on soldier {} @ {}", soldier_index, soldier_loc);
//Node is location, distance, seen_turret
let mut queue: VecDeque<(GridRowColVec, usize, bool)> = VecDeque::new();
let mut visited = BitVec::from_elem(grid.C * grid.R, false);
queue.push_back((soldier_loc.convert(), 0, false));
visited.set(soldier_loc.data[0] * grid.C + soldier_loc.data[1], true);
while!queue.is_empty() {
let (loc, dist, seen_turret) = queue.pop_front().unwrap();
let visible_turrets = turret_reachable_squares_list
.iter()
.enumerate()
.filter(|(turret_index, turret_squares)| {
turret_locations.contains(t_mapping.get_by_left(turret_index).unwrap())
&& turret_squares.contains(&loc)
})
.map(|(turret_index, _)| turret_index);
let mut turret_visible = false;
for turret_index in visible_turrets {
turret_visible = true;
if!is_g_prime || (!seen_turret && is_g_prime) {
let s_vertex = *s_mapping.get_by_right(soldier_loc).unwrap();
//The turret index is already using the original grids index
/*debug!("Found s{} t{} mapped to soldier {} => {} at loc {}",
soldier_index, turret_index, s_vertex, t_vertex, loc);*/
G.insert((s_vertex, s_mapping.len() + turret_index));
}
}
//no need to queue once we have been shot by a turret
if is_g_prime && turret_visible {
continue;
}
/*
debug!(
"Viewing {} dist {} seen turret? {} turret visible? {}",
loc, dist, seen_turret, turret_visible
);*/
for dir in DIRECTIONS.iter() {
let new_loc = loc.clone() + dir;
if let Some(tile) = grid.get_value(&new_loc) {
if *tile == Building {
continue;
}
let newLocIndex = (new_loc.data[0] * grid.C as i64 + new_loc.data[1]) as usize;
if visited[newLocIndex] {
continue;
}
visited.set(newLocIndex, true);
let new_dist = dist + 1;
if new_dist > M {
continue;
}
let new_seen_turret = seen_turret || turret_visible;
queue.push_back((new_loc, new_dist, new_seen_turret));
}
}
}
}
debug!("Built graph from\n{}\n S={} T={}", grid, S, T);
G
}
impl Display for Grid<Tile>
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
for r in 0..self.R {
for c in 0..self.C {
if let Err(err) = write!(f, "{}", self[(r, c)]) |
}
| {
return Err(err);
} | conditional_block |
d.rs |
use crate::algo::graph::flow::*;
use crate::algo::graph::*;
use crate::util::grid::constants::*;
use crate::util::grid::{Grid, GridCoord, GridRowColVec};
use crate::util::input::*;
//use std::thread;
use bimap::BiMap;
use bit_vec::BitVec;
use indexmap::IndexSet;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::default::Default;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::sync::mpsc::channel;
use std::time::Instant;
use threadpool::ThreadPool;
pub fn solve_all_cases()
{
run_cases(
&["D-small-practice", "D-large-practice"],
"y2017round2",
|reader, buffer| {
//let mut children: Vec<thread::JoinHandle<_>> = vec![];
let pool = ThreadPool::new(6);
let (tx, rx) = channel();
let t = reader.read_int();
for case in 1..=t {
let (C, R, M) = reader.read_tuple_3::<usize>();
let mut grid: Grid<Tile> = Grid::new(R, C);
for r in 0..R {
let row = reader.read_chars(C);
for (c, t) in row.iter().enumerate() {
grid[(r, c)] = Tile::from(*t);
}
}
let tx = tx.clone();
pool.execute(move || {
let now = Instant::now();
let _ = writeln!(::std::io::stderr(), "Starting {} of {} ", case, t);
let s = solve(case, &mut grid, M);
tx.send((case, s)).expect("Channel is there");
let duration = now.elapsed();
let secs = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1e9f64;
let _ = writeln!(
::std::io::stderr(),
"Finished #{} in {:.2} second(s)",
case,
secs
);
});
}
let mut output = rx.iter().take(t as usize).collect::<Vec<_>>();
output.sort();
for (_, s) in output {
write!(buffer, "{}", s).unwrap();
}
},
);
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum Tile
{
Empty,
Building,
Soldier,
Turret,
}
use self::Tile::*;
use crate::util::codejam::run_cases;
impl Tile
{
fn to_char(self) -> char
{
match self {
Empty => '.',
Building => '#',
Soldier => 'S',
Turret => 'T',
}
}
}
impl From<char> for Tile
{
fn from(item: char) -> Self
{
match item {
'.' => Empty,
'#' => Building,
'S' => Soldier,
'T' => Turret,
_ => panic!("Character not recognized: {}", item),
}
}
}
impl Display for Tile
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
write!(f, "{}", self.to_char())
}
}
impl Default for Tile
{
fn default() -> Tile
{
Empty
}
}
//problem specific code
fn reachable(grid: &Grid<Tile>, location: &GridCoord) -> HashSet<GridRowColVec>
{
let mut r = HashSet::new();
//debug!("\nTracing {} starting at {}", location, direction);
for direction in DIRECTIONS.iter() {
let mut loc: GridRowColVec = location.convert();
for _ in 0..=grid.R + grid.C {
loc += direction;
if let Some(tile) = grid.get_value(&loc) {
match *tile {
Building => {
break;
}
_ => {
r.insert(loc.clone());
}
};
} else {
break;
}
}
}
r
}
/*
impl<L, R> FromIterator<(L, R)> for BiMap<L, R>
{
fn from_iter<I: IntoIterator<Item = (L, R)>>(iter: I) -> Self
{
let mut c = BiMap::new();
for i in iter {
c.insert(i.0, i.1);
}
c
}
}*/
fn | <'a>(case_no: u32, grid: &mut Grid<Tile>, M_soldier_limit: usize) -> String
{
debug!(
"Solving case {}\nM={}\n{}\n",
case_no, M_soldier_limit, grid
);
//original solider & turret index to location map
let S_map = grid
.filter_by_val(&Soldier)
.enumerate()
.collect::<BiMap<_, _>>();
let turret_locations = grid.filter_by_val(&Turret).collect::<Vec<_>>();
//precalucate what squares a turret can reach
let turret_reachable_squares_list = turret_locations
.iter()
.map(|t_loc| reachable(&grid, &t_loc))
.collect::<Vec<_>>();
let T_map = turret_locations
.into_iter()
.enumerate()
.collect::<BiMap<_, _>>();
let S = grid.filter_by_val(&Soldier).count();
let T = grid.filter_by_val(&Turret).count();
//Construct the initial Graph
let G_edges = build_graph(
&grid,
false,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
let mut G = FlowGraph::new(2 + S + T, 4);
for uv in G_edges {
G.add_edge(uv.0, uv.1, 1, 1);
}
let source = S + T;
let sink = S + T + 1;
let vertex_to_string = |v: usize| match v {
s if s < S => format!("Soldier #{} ({:?})", s + 1, *S_map.get_by_left(&s).unwrap()),
t if t >= S && t < S + T => format!(
"Turret #{} ({:?})",
t - S + 1,
*T_map.get_by_left(&(t - S)).unwrap()
),
v if v == sink => "Sink".to_string(),
_source => "Source".to_string(),
};
//BFS for each soldier
//will be in left to right order, then top down order
//Now find max matching of G (G has an edge from soldier s to turret t if and only if soldier s can destroy turret t after all other turrets have been destroyed)
for s in 0..S {
G.add_edge(source, s, 1, 1);
}
for t in S..S + T {
G.add_edge(t, sink, 1, 1);
}
let (R, flow) = G.dinic(source, sink);
let mut ans = format!("Case #{}: {}\n", case_no, R);
//Compute initial matching
let mut M = flow
.iter()
.enumerate()
.filter(|&(_e, f)| *f > 0)
//map to u->v
.map(|(e, _f)| (G.graph.endp[e ^ 1], G.graph.endp[e]))
//leave out source and sink nodes
.filter(|&(u, v)| u!= source && v!= sink)
.collect::<Vec<_>>();
debug!(
"Edges in M initial matching=\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let mut r = R;
while r > 0 {
//Let us define the graph G' with the same nodes as G, but an edge between soldier s and turret t only exists in G' if s can destroy t with the other turrets active
let Gprime = build_graph(
&grid,
true,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
//Now build graph H
let mut H = Graph::new(S + T, 4);
let soldiers_in_m = M.iter().map(|&(s, _t)| s).collect::<Vec<_>>();
for &(s, t) in Gprime.iter() {
if soldiers_in_m.contains(&s) {
H.add_edge(s, t);
}
}
for &(s, t) in M.iter() {
H.add_edge(t, s);
}
debug!(
"Current matching M =\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in G'=\n{}\n",
Gprime
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in H=\n{}\n",
H.edges()
.map(|(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let turrets_in_M = M.iter().map(|&(_s, t)| t).collect::<Vec<_>>();
//find an edge (s,t') where t' is not in m
let st_prime = Gprime.iter().find(|&(_s, t)|!turrets_in_M.contains(t));
if st_prime.is_some() {
let &(s, t) = st_prime.unwrap();
debug!("Found (s,t') s={} t'={}", s, t - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
//Also remove from current matching
let to_remove = M
.iter()
.position(|&(s_in_m, _t)| s_in_m == s)
.expect("Soldier should be in mapping");
M.remove(to_remove);
continue;
}
//Now we need to find a cycle
//Start at a soldier in H
let soldier_in_h = H.edges().filter(|&(u, _v)| u <= S).next().unwrap().0;
let mut cycle_edges = VecDeque::new();
let mut edge = (
soldier_in_h,
H.adj_list_with_edges(soldier_in_h).next().unwrap().1,
);
let mut visited = BitVec::from_elem(H.num_v(), false);
while!visited[edge.0] {
visited.set(edge.0, true);
cycle_edges.push_back(edge);
debug!(
"pushed Edge {:?} ",
format!("{}->{}", vertex_to_string(edge.0), vertex_to_string(edge.1))
);
//adj list returns an (internal edge index, next vertex)
edge = (edge.1, H.adj_list_with_edges(edge.1).next().unwrap().1);
debug!("Edge {:?} ", edge);
}
//cut to the actual cycle found
let cycle_end = cycle_edges.back().unwrap().1;
let cycle_start = cycle_edges
.iter()
.position(|&(u, _v)| u == cycle_end)
.unwrap();
cycle_edges.drain(0..cycle_start);
debug!(
"Cycle C =\n{}\n",
cycle_edges
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Consider a new matching M' of G consisting of the edges of M whose reverse is not in C, p
// lus the edges in C whose reverse is not in M. That is, M' is M but exchanging the edges
// present in C in some direction. M' in this case is also a matching of G of the same size as M
//because it is a cycle, we know we have new edges from G' to replace the ones removed from M
let mut M_new: Vec<(usize, usize)> = Vec::new();
M_new.extend(M.iter().filter(|&&(u, v)|!cycle_edges.contains(&(v, u))));
M_new.extend(cycle_edges.iter().filter(|&&(u, v)|!M.contains(&(v, u))));
debug!(
"New matching M =\n{}\n",
M_new
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Find all edges from G' which are actions we can take
let st_actions = M_new
.iter()
.filter(|&uv| Gprime.contains(uv))
.collect::<Vec<_>>();
for &&(s, t) in st_actions.iter() {
debug!("Taking actions from g' s {} t {}", s + 1, t + 1 - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
}
M = M_new;
}
ans
}
fn build_graph(
grid: &Grid<Tile>,
is_g_prime: bool,
M: usize,
s_mapping: &BiMap<usize, GridCoord>,
t_mapping: &BiMap<usize, GridCoord>,
turret_reachable_squares_list: &Vec<HashSet<GridRowColVec>>,
) -> IndexSet<(usize, usize)>
{
let mut G: IndexSet<(usize, usize)> = IndexSet::new();
let turret_locations = grid.filter_by_val(&Turret).collect::<HashSet<_>>();
/*
for (turret_index, turret_squares) in turret_squares_list.iter().enumerate() {
debug!("Turret {} can see {:?}", turret_index, turret_squares);
}
*/
let soldier_locations = grid.filter_by_val(&Soldier).collect::<Vec<_>>();
let S = soldier_locations.len();
let T = turret_reachable_squares_list.len();
for (_soldier_index, soldier_loc) in soldier_locations.iter().enumerate() {
//debug!("BFS search on soldier {} @ {}", soldier_index, soldier_loc);
//Node is location, distance, seen_turret
let mut queue: VecDeque<(GridRowColVec, usize, bool)> = VecDeque::new();
let mut visited = BitVec::from_elem(grid.C * grid.R, false);
queue.push_back((soldier_loc.convert(), 0, false));
visited.set(soldier_loc.data[0] * grid.C + soldier_loc.data[1], true);
while!queue.is_empty() {
let (loc, dist, seen_turret) = queue.pop_front().unwrap();
let visible_turrets = turret_reachable_squares_list
.iter()
.enumerate()
.filter(|(turret_index, turret_squares)| {
turret_locations.contains(t_mapping.get_by_left(turret_index).unwrap())
&& turret_squares.contains(&loc)
})
.map(|(turret_index, _)| turret_index);
let mut turret_visible = false;
for turret_index in visible_turrets {
turret_visible = true;
if!is_g_prime || (!seen_turret && is_g_prime) {
let s_vertex = *s_mapping.get_by_right(soldier_loc).unwrap();
//The turret index is already using the original grids index
/*debug!("Found s{} t{} mapped to soldier {} => {} at loc {}",
soldier_index, turret_index, s_vertex, t_vertex, loc);*/
G.insert((s_vertex, s_mapping.len() + turret_index));
}
}
//no need to queue once we have been shot by a turret
if is_g_prime && turret_visible {
continue;
}
/*
debug!(
"Viewing {} dist {} seen turret? {} turret visible? {}",
loc, dist, seen_turret, turret_visible
);*/
for dir in DIRECTIONS.iter() {
let new_loc = loc.clone() + dir;
if let Some(tile) = grid.get_value(&new_loc) {
if *tile == Building {
continue;
}
let newLocIndex = (new_loc.data[0] * grid.C as i64 + new_loc.data[1]) as usize;
if visited[newLocIndex] {
continue;
}
visited.set(newLocIndex, true);
let new_dist = dist + 1;
if new_dist > M {
continue;
}
let new_seen_turret = seen_turret || turret_visible;
queue.push_back((new_loc, new_dist, new_seen_turret));
}
}
}
}
debug!("Built graph from\n{}\n S={} T={}", grid, S, T);
G
}
impl Display for Grid<Tile>
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
for r in 0..self.R {
for c in 0..self.C {
if let Err(err) = write!(f, "{}", self[(r, c)]) {
return Err(err);
}
}
| solve | identifier_name |
simple_http.rs | self.addr,
username.as_str(),
password.as_str(),
)?
} else {
Socks5Stream::connect(self.proxy_addr, self.addr)?
};
Ok(stream.into_inner())
}
#[cfg(not(feature = "proxy"))]
fn fresh_socket(&self) -> Result<TcpStream, Error> {
let stream = TcpStream::connect_timeout(&self.addr, self.timeout)?;
stream.set_read_timeout(Some(self.timeout))?;
stream.set_write_timeout(Some(self.timeout))?;
Ok(stream)
}
fn try_request<R>(&self, req: impl serde::Serialize) -> Result<R, Error>
where
R: for<'a> serde::de::Deserialize<'a>,
{
// No part of this codebase should panic, so unwrapping a mutex lock is fine
let mut sock_lock: MutexGuard<Option<_>> = self.sock.lock().expect("poisoned mutex");
if sock_lock.is_none() {
*sock_lock = Some(BufReader::new(self.fresh_socket()?));
};
// In the immediately preceding block, we made sure that `sock` is non-`None`,
// so unwrapping here is fine.
let sock: &mut BufReader<_> = sock_lock.as_mut().unwrap();
// Serialize the body first so we can set the Content-Length header.
let body = serde_json::to_vec(&req)?;
let mut request_bytes = Vec::new();
request_bytes.write_all(b"POST ")?;
request_bytes.write_all(self.path.as_bytes())?;
request_bytes.write_all(b" HTTP/1.1\r\n")?;
// Write headers
request_bytes.write_all(b"host: ")?;
request_bytes.write_all(self.addr.to_string().as_bytes())?;
request_bytes.write_all(b"\r\n")?;
request_bytes.write_all(b"Content-Type: application/json\r\n")?;
request_bytes.write_all(b"Content-Length: ")?;
request_bytes.write_all(body.len().to_string().as_bytes())?;
request_bytes.write_all(b"\r\n")?;
if let Some(ref auth) = self.basic_auth {
request_bytes.write_all(b"Authorization: ")?;
request_bytes.write_all(auth.as_ref())?;
request_bytes.write_all(b"\r\n")?;
}
// Write body
request_bytes.write_all(b"\r\n")?;
request_bytes.write_all(&body)?;
// Send HTTP request
let write_success = sock.get_mut().write_all(request_bytes.as_slice()).is_ok()
&& sock.get_mut().flush().is_ok();
// This indicates the socket is broken so let's retry the send once with a fresh socket
if!write_success {
*sock.get_mut() = self.fresh_socket()?;
sock.get_mut().write_all(request_bytes.as_slice())?;
sock.get_mut().flush()?;
}
// Parse first HTTP response header line
let mut header_buf = String::new();
let read_success = sock.read_line(&mut header_buf).is_ok();
// This is another possible indication that the socket is broken so let's retry the send once
// with a fresh socket IF the write attempt has not already experienced a failure
if (!read_success || header_buf.is_empty()) && write_success {
*sock.get_mut() = self.fresh_socket()?;
sock.get_mut().write_all(request_bytes.as_slice())?;
sock.get_mut().flush()?;
sock.read_line(&mut header_buf)?;
}
if header_buf.len() < 12 {
return Err(Error::HttpResponseTooShort {
actual: header_buf.len(),
needed: 12,
});
}
if!header_buf.as_bytes()[..12].is_ascii() {
return Err(Error::HttpResponseNonAsciiHello(header_buf.as_bytes()[..12].to_vec()));
}
if!header_buf.starts_with("HTTP/1.1 ") {
return Err(Error::HttpResponseBadHello {
actual: header_buf[0..9].into(),
expected: "HTTP/1.1 ".into(),
});
}
let response_code = match header_buf[9..12].parse::<u16>() {
Ok(n) => n,
Err(e) => return Err(Error::HttpResponseBadStatus(header_buf[9..12].into(), e)),
};
// Parse response header fields
let mut content_length = None;
loop {
header_buf.clear();
sock.read_line(&mut header_buf)?;
if header_buf == "\r\n" {
break;
}
header_buf.make_ascii_lowercase();
const CONTENT_LENGTH: &str = "content-length: ";
if let Some(s) = header_buf.strip_prefix(CONTENT_LENGTH) {
content_length = Some(
s.trim()
.parse::<u64>()
.map_err(|e| Error::HttpResponseBadContentLength(s.into(), e))?,
);
}
}
if response_code == 401 {
// There is no body in a 401 response, so don't try to read it
return Err(Error::HttpErrorCode(response_code));
}
// Read up to `content_length` bytes. Note that if there is no content-length
// header, we will assume an effectively infinite content length, i.e. we will
// just keep reading from the socket until it is closed.
let mut reader = match content_length {
None => sock.take(FINAL_RESP_ALLOC),
Some(n) if n > FINAL_RESP_ALLOC => {
return Err(Error::HttpResponseContentLengthTooLarge {
length: n,
max: FINAL_RESP_ALLOC,
});
}
Some(n) => sock.take(n),
};
// Attempt to parse the response. Don't check the HTTP error code until
// after parsing, since Bitcoin Core will often return a descriptive JSON
// error structure which is more useful than the error code.
match serde_json::from_reader(&mut reader) {
Ok(s) => {
if content_length.is_some() {
reader.bytes().count(); // consume any trailing bytes
}
Ok(s)
}
Err(e) => {
// If the response was not 200, assume the parse failed because of that
if response_code!= 200 {
Err(Error::HttpErrorCode(response_code))
} else {
// If it was 200 then probably it was legitimately a parse error
Err(e.into())
}
}
}
}
}
/// Does some very basic manual URL parsing because the uri/url crates
/// all have unicode-normalization as a dependency and that's broken.
fn check_url(url: &str) -> Result<(SocketAddr, String), Error> {
// The fallback port in case no port was provided.
// This changes when the http or https scheme was provided.
let mut fallback_port = DEFAULT_PORT;
// We need to get the hostname and the port.
// (1) Split scheme
let after_scheme = {
let mut split = url.splitn(2, "://");
let s = split.next().unwrap();
match split.next() {
None => s, // no scheme present
Some(after) => {
// Check if the scheme is http or https.
if s == "http" {
fallback_port = 80;
} else if s == "https" {
fallback_port = 443;
} else {
return Err(Error::url(url, "scheme should be http or https"));
}
after
}
}
};
// (2) split off path
let (before_path, path) = {
if let Some(slash) = after_scheme.find('/') {
(&after_scheme[0..slash], &after_scheme[slash..])
} else {
(after_scheme, "/")
}
};
// (3) split off auth part
let after_auth = {
let mut split = before_path.splitn(2, '@');
let s = split.next().unwrap();
split.next().unwrap_or(s)
};
// (4) Parse into socket address.
// At this point we either have <host_name> or <host_name_>:<port>
// `std::net::ToSocketAddrs` requires `&str` to have <host_name_>:<port> format.
let mut addr = match after_auth.to_socket_addrs() {
Ok(addr) => addr,
Err(_) => {
// Invalid socket address. Try to add port.
format!("{}:{}", after_auth, fallback_port).to_socket_addrs()?
}
};
match addr.next() {
Some(a) => Ok((a, path.to_owned())),
None => Err(Error::url(url, "invalid hostname: error extracting socket address")),
}
}
impl Transport for SimpleHttpTransport {
fn send_request(&self, req: Request) -> Result<Response, crate::Error> {
Ok(self.request(req)?)
}
fn send_batch(&self, reqs: &[Request]) -> Result<Vec<Response>, crate::Error> {
Ok(self.request(reqs)?)
}
fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "http://{}:{}{}", self.addr.ip(), self.addr.port(), self.path)
}
}
/// Builder for simple bitcoind [`SimpleHttpTransport`].
#[derive(Clone, Debug)]
pub struct Builder {
tp: SimpleHttpTransport,
}
impl Builder {
/// Constructs a new [`Builder`] with default configuration.
pub fn new() -> Builder {
Builder {
tp: SimpleHttpTransport::new(),
}
}
/// Sets the timeout after which requests will abort if they aren't finished.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.tp.timeout = timeout;
self
}
/// Sets the URL of the server to the transport.
pub fn url(mut self, url: &str) -> Result<Self, Error> {
self.tp.set_url(url)?;
Ok(self)
}
/// Adds authentication information to the transport.
pub fn auth<S: AsRef<str>>(mut self, user: S, pass: Option<S>) -> Self {
let mut auth = user.as_ref().to_owned();
auth.push(':');
if let Some(ref pass) = pass {
auth.push_str(pass.as_ref());
}
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(auth.as_bytes())));
self
}
/// Adds authentication information to the transport using a cookie string ('user:pass').
pub fn cookie_auth<S: AsRef<str>>(mut self, cookie: S) -> Self {
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(cookie.as_ref().as_bytes())));
self
}
/// Adds proxy address to the transport for SOCKS5 proxy.
#[cfg(feature = "proxy")]
pub fn proxy_addr<S: AsRef<str>>(mut self, proxy_addr: S) -> Result<Self, Error> {
// We don't expect path in proxy address.
self.tp.proxy_addr = check_url(proxy_addr.as_ref())?.0;
Ok(self)
}
/// Adds optional proxy authentication as ('username', 'password').
#[cfg(feature = "proxy")]
pub fn proxy_auth<S: AsRef<str>>(mut self, user: S, pass: S) -> Self {
self.tp.proxy_auth =
Some((user, pass)).map(|(u, p)| (u.as_ref().to_string(), p.as_ref().to_string()));
self
}
/// Builds the final [`SimpleHttpTransport`].
pub fn build(self) -> SimpleHttpTransport {
self.tp
}
}
impl Default for Builder {
fn default() -> Self {
Builder::new()
}
}
impl crate::Client {
/// Creates a new JSON-RPC client using a bare-minimum HTTP transport.
pub fn simple_http(
url: &str,
user: Option<String>,
pass: Option<String>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
Ok(crate::Client::with_transport(builder.build()))
}
/// Creates a new JSON_RPC client using a HTTP-Socks5 proxy transport.
#[cfg(feature = "proxy")]
pub fn http_proxy(
url: &str,
user: Option<String>,
pass: Option<String>,
proxy_addr: &str,
proxy_auth: Option<(&str, &str)>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
builder = builder.proxy_addr(proxy_addr)?;
if let Some((user, pass)) = proxy_auth {
builder = builder.proxy_auth(user, pass);
}
let tp = builder.build();
Ok(crate::Client::with_transport(tp))
}
}
/// Error that can happen when sending requests.
#[derive(Debug)]
pub enum Error {
/// An invalid URL was passed.
InvalidUrl {
/// The URL passed.
url: String,
/// The reason the URL is invalid.
reason: &'static str,
},
/// An error occurred on the socket layer.
SocketError(io::Error),
/// The HTTP response was too short to even fit a HTTP 1.1 header.
HttpResponseTooShort {
/// The total length of the response.
actual: usize,
/// Minimum length we can parse.
needed: usize,
},
/// The HTTP response started with a HTTP/1.1 line which was not ASCII.
HttpResponseNonAsciiHello(Vec<u8>),
/// The HTTP response did not start with HTTP/1.1
HttpResponseBadHello {
/// Actual HTTP-whatever string.
actual: String,
/// The hello string of the HTTP version we support.
expected: String,
},
/// Could not parse the status value as a number.
HttpResponseBadStatus(String, num::ParseIntError),
/// Could not parse the status value as a number.
HttpResponseBadContentLength(String, num::ParseIntError),
/// The indicated content-length header exceeded our maximum.
HttpResponseContentLengthTooLarge {
/// The length indicated in the content-length header.
length: u64,
/// Our hard maximum on number of bytes we'll try to read.
max: u64,
},
/// Unexpected HTTP error code (non-200).
HttpErrorCode(u16),
/// Received EOF before getting as many bytes as were indicated by the content-length header.
IncompleteResponse {
/// The content-length header.
content_length: u64,
/// The number of bytes we actually read.
n_read: u64,
},
/// JSON parsing error.
Json(serde_json::Error),
}
impl Error {
/// Utility method to create [`Error::InvalidUrl`] variants.
fn url<U: Into<String>>(url: U, reason: &'static str) -> Error {
Error::InvalidUrl {
url: url.into(),
reason,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use Error::*;
match *self {
InvalidUrl {
ref url,
ref reason,
} => write!(f, "invalid URL '{}': {}", url, reason),
SocketError(ref e) => write!(f, "Couldn't connect to host: {}", e),
HttpResponseTooShort {
ref actual,
ref needed,
} => {
write!(f, "HTTP response too short: length {}, needed {}.", actual, needed)
}
HttpResponseNonAsciiHello(ref bytes) => {
write!(f, "HTTP response started with non-ASCII {:?}", bytes)
}
HttpResponseBadHello {
ref actual,
ref expected,
} => {
write!(f, "HTTP response started with `{}`; expected `{}`.", actual, expected)
}
HttpResponseBadStatus(ref status, ref err) => {
write!(f, "HTTP response had bad status code `{}`: {}.", status, err)
}
HttpResponseBadContentLength(ref len, ref err) => {
write!(f, "HTTP response had bad content length `{}`: {}.", len, err)
}
HttpResponseContentLengthTooLarge {
length,
max,
} => {
write!(f, "HTTP response content length {} exceeds our max {}.", length, max)
}
HttpErrorCode(c) => write!(f, "unexpected HTTP code: {}", c),
IncompleteResponse {
content_length,
n_read,
} => {
write!(
f,
"read {} bytes but HTTP response content-length header was {}.",
n_read, content_length
)
}
Json(ref e) => write!(f, "JSON error: {}", e),
}
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error +'static)> | | IncompleteResponse {
..
} => None,
SocketError(ref e) => Some(e),
Json(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::SocketError(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Self {
Error::Json(e)
}
}
impl From<Error> for crate::Error {
fn from(e: Error) -> crate::Error {
match e {
Error::Json(e) => crate::Error::Json(e),
e => crate::Error::Transport(Box::new(e)),
}
}
}
/// Global mutex used by the fuzzing harness to inject data into the read end of the TCP stream.
#[cfg(jsonrpc_fuzz)]
pub static FUZZ_TCP_SOCK: Mutex<Option<io::Cursor<Vec<u8>>>> = Mutex::new(None);
#[cfg(jsonrpc_fuzz)]
#[derive(Clone, Debug)]
struct TcpStream;
#[cfg(jsonrpc_fuzz)]
mod impls {
use super::*;
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match *FUZZ_TCP_SOCK.lock().unwrap() {
Some(ref mut cursor) => io::Read::read(cursor, buf),
None => Ok(0),
}
}
}
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
| {
use self::Error::*;
match *self {
InvalidUrl {
..
}
| HttpResponseTooShort {
..
}
| HttpResponseNonAsciiHello(..)
| HttpResponseBadHello {
..
}
| HttpResponseBadStatus(..)
| HttpResponseBadContentLength(..)
| HttpResponseContentLengthTooLarge {
..
}
| HttpErrorCode(_) | identifier_body |
simple_http.rs | self.addr,
username.as_str(),
password.as_str(),
)?
} else {
Socks5Stream::connect(self.proxy_addr, self.addr)?
};
Ok(stream.into_inner())
}
#[cfg(not(feature = "proxy"))]
fn fresh_socket(&self) -> Result<TcpStream, Error> {
let stream = TcpStream::connect_timeout(&self.addr, self.timeout)?;
stream.set_read_timeout(Some(self.timeout))?;
stream.set_write_timeout(Some(self.timeout))?;
Ok(stream)
}
fn try_request<R>(&self, req: impl serde::Serialize) -> Result<R, Error>
where
R: for<'a> serde::de::Deserialize<'a>,
{
// No part of this codebase should panic, so unwrapping a mutex lock is fine
let mut sock_lock: MutexGuard<Option<_>> = self.sock.lock().expect("poisoned mutex");
if sock_lock.is_none() {
*sock_lock = Some(BufReader::new(self.fresh_socket()?));
};
// In the immediately preceding block, we made sure that `sock` is non-`None`,
// so unwrapping here is fine.
let sock: &mut BufReader<_> = sock_lock.as_mut().unwrap();
// Serialize the body first so we can set the Content-Length header.
let body = serde_json::to_vec(&req)?;
let mut request_bytes = Vec::new();
request_bytes.write_all(b"POST ")?;
request_bytes.write_all(self.path.as_bytes())?;
request_bytes.write_all(b" HTTP/1.1\r\n")?;
// Write headers
request_bytes.write_all(b"host: ")?;
request_bytes.write_all(self.addr.to_string().as_bytes())?;
request_bytes.write_all(b"\r\n")?;
request_bytes.write_all(b"Content-Type: application/json\r\n")?;
request_bytes.write_all(b"Content-Length: ")?;
request_bytes.write_all(body.len().to_string().as_bytes())?;
request_bytes.write_all(b"\r\n")?;
if let Some(ref auth) = self.basic_auth {
request_bytes.write_all(b"Authorization: ")?;
request_bytes.write_all(auth.as_ref())?;
request_bytes.write_all(b"\r\n")?;
}
// Write body
request_bytes.write_all(b"\r\n")?;
request_bytes.write_all(&body)?;
// Send HTTP request
let write_success = sock.get_mut().write_all(request_bytes.as_slice()).is_ok()
&& sock.get_mut().flush().is_ok();
// This indicates the socket is broken so let's retry the send once with a fresh socket
if!write_success {
*sock.get_mut() = self.fresh_socket()?;
sock.get_mut().write_all(request_bytes.as_slice())?;
sock.get_mut().flush()?;
}
// Parse first HTTP response header line
let mut header_buf = String::new();
let read_success = sock.read_line(&mut header_buf).is_ok();
// This is another possible indication that the socket is broken so let's retry the send once
// with a fresh socket IF the write attempt has not already experienced a failure
if (!read_success || header_buf.is_empty()) && write_success {
*sock.get_mut() = self.fresh_socket()?;
sock.get_mut().write_all(request_bytes.as_slice())?;
sock.get_mut().flush()?;
sock.read_line(&mut header_buf)?;
}
if header_buf.len() < 12 {
return Err(Error::HttpResponseTooShort {
actual: header_buf.len(),
needed: 12,
});
}
if!header_buf.as_bytes()[..12].is_ascii() {
return Err(Error::HttpResponseNonAsciiHello(header_buf.as_bytes()[..12].to_vec()));
}
if!header_buf.starts_with("HTTP/1.1 ") {
return Err(Error::HttpResponseBadHello {
actual: header_buf[0..9].into(),
expected: "HTTP/1.1 ".into(),
});
}
let response_code = match header_buf[9..12].parse::<u16>() {
Ok(n) => n,
Err(e) => return Err(Error::HttpResponseBadStatus(header_buf[9..12].into(), e)),
};
// Parse response header fields
let mut content_length = None;
loop {
header_buf.clear();
sock.read_line(&mut header_buf)?;
if header_buf == "\r\n" {
break;
}
header_buf.make_ascii_lowercase();
const CONTENT_LENGTH: &str = "content-length: ";
if let Some(s) = header_buf.strip_prefix(CONTENT_LENGTH) {
content_length = Some(
s.trim()
.parse::<u64>()
.map_err(|e| Error::HttpResponseBadContentLength(s.into(), e))?,
);
}
}
if response_code == 401 {
// There is no body in a 401 response, so don't try to read it
return Err(Error::HttpErrorCode(response_code));
}
// Read up to `content_length` bytes. Note that if there is no content-length
// header, we will assume an effectively infinite content length, i.e. we will
// just keep reading from the socket until it is closed.
let mut reader = match content_length {
None => sock.take(FINAL_RESP_ALLOC),
Some(n) if n > FINAL_RESP_ALLOC => {
return Err(Error::HttpResponseContentLengthTooLarge {
length: n,
max: FINAL_RESP_ALLOC,
});
}
Some(n) => sock.take(n),
};
// Attempt to parse the response. Don't check the HTTP error code until
// after parsing, since Bitcoin Core will often return a descriptive JSON
// error structure which is more useful than the error code.
match serde_json::from_reader(&mut reader) {
Ok(s) => {
if content_length.is_some() {
reader.bytes().count(); // consume any trailing bytes
}
Ok(s)
}
Err(e) => {
// If the response was not 200, assume the parse failed because of that
if response_code!= 200 {
Err(Error::HttpErrorCode(response_code))
} else {
// If it was 200 then probably it was legitimately a parse error
Err(e.into())
}
}
}
}
}
/// Does some very basic manual URL parsing because the uri/url crates
/// all have unicode-normalization as a dependency and that's broken.
fn check_url(url: &str) -> Result<(SocketAddr, String), Error> {
// The fallback port in case no port was provided.
// This changes when the http or https scheme was provided.
let mut fallback_port = DEFAULT_PORT;
// We need to get the hostname and the port.
// (1) Split scheme
let after_scheme = {
let mut split = url.splitn(2, "://");
let s = split.next().unwrap();
match split.next() {
None => s, // no scheme present
Some(after) => {
// Check if the scheme is http or https.
if s == "http" {
fallback_port = 80;
} else if s == "https" {
fallback_port = 443;
} else {
return Err(Error::url(url, "scheme should be http or https"));
}
after
}
}
};
// (2) split off path
let (before_path, path) = {
if let Some(slash) = after_scheme.find('/') {
(&after_scheme[0..slash], &after_scheme[slash..])
} else {
(after_scheme, "/")
}
};
// (3) split off auth part
let after_auth = {
let mut split = before_path.splitn(2, '@');
let s = split.next().unwrap();
split.next().unwrap_or(s)
};
// (4) Parse into socket address.
// At this point we either have <host_name> or <host_name_>:<port>
// `std::net::ToSocketAddrs` requires `&str` to have <host_name_>:<port> format.
let mut addr = match after_auth.to_socket_addrs() {
Ok(addr) => addr,
Err(_) => {
// Invalid socket address. Try to add port.
format!("{}:{}", after_auth, fallback_port).to_socket_addrs()?
}
};
match addr.next() {
Some(a) => Ok((a, path.to_owned())),
None => Err(Error::url(url, "invalid hostname: error extracting socket address")),
}
}
impl Transport for SimpleHttpTransport {
fn send_request(&self, req: Request) -> Result<Response, crate::Error> {
Ok(self.request(req)?)
}
fn send_batch(&self, reqs: &[Request]) -> Result<Vec<Response>, crate::Error> {
Ok(self.request(reqs)?)
}
fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "http://{}:{}{}", self.addr.ip(), self.addr.port(), self.path)
}
}
/// Builder for simple bitcoind [`SimpleHttpTransport`].
#[derive(Clone, Debug)]
pub struct Builder {
tp: SimpleHttpTransport,
}
impl Builder {
/// Constructs a new [`Builder`] with default configuration.
pub fn new() -> Builder {
Builder {
tp: SimpleHttpTransport::new(),
}
}
/// Sets the timeout after which requests will abort if they aren't finished.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.tp.timeout = timeout;
self
}
/// Sets the URL of the server to the transport.
pub fn url(mut self, url: &str) -> Result<Self, Error> {
self.tp.set_url(url)?;
Ok(self)
}
/// Adds authentication information to the transport.
pub fn auth<S: AsRef<str>>(mut self, user: S, pass: Option<S>) -> Self {
let mut auth = user.as_ref().to_owned();
auth.push(':');
if let Some(ref pass) = pass {
auth.push_str(pass.as_ref());
}
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(auth.as_bytes())));
self
}
/// Adds authentication information to the transport using a cookie string ('user:pass').
pub fn cookie_auth<S: AsRef<str>>(mut self, cookie: S) -> Self {
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(cookie.as_ref().as_bytes())));
self
}
/// Adds proxy address to the transport for SOCKS5 proxy.
#[cfg(feature = "proxy")]
pub fn proxy_addr<S: AsRef<str>>(mut self, proxy_addr: S) -> Result<Self, Error> {
// We don't expect path in proxy address.
self.tp.proxy_addr = check_url(proxy_addr.as_ref())?.0;
Ok(self)
}
/// Adds optional proxy authentication as ('username', 'password').
#[cfg(feature = "proxy")]
pub fn proxy_auth<S: AsRef<str>>(mut self, user: S, pass: S) -> Self {
self.tp.proxy_auth =
Some((user, pass)).map(|(u, p)| (u.as_ref().to_string(), p.as_ref().to_string()));
self
}
/// Builds the final [`SimpleHttpTransport`].
pub fn | (self) -> SimpleHttpTransport {
self.tp
}
}
impl Default for Builder {
fn default() -> Self {
Builder::new()
}
}
impl crate::Client {
/// Creates a new JSON-RPC client using a bare-minimum HTTP transport.
pub fn simple_http(
url: &str,
user: Option<String>,
pass: Option<String>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
Ok(crate::Client::with_transport(builder.build()))
}
/// Creates a new JSON_RPC client using a HTTP-Socks5 proxy transport.
#[cfg(feature = "proxy")]
pub fn http_proxy(
url: &str,
user: Option<String>,
pass: Option<String>,
proxy_addr: &str,
proxy_auth: Option<(&str, &str)>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
builder = builder.proxy_addr(proxy_addr)?;
if let Some((user, pass)) = proxy_auth {
builder = builder.proxy_auth(user, pass);
}
let tp = builder.build();
Ok(crate::Client::with_transport(tp))
}
}
/// Error that can happen when sending requests.
#[derive(Debug)]
pub enum Error {
/// An invalid URL was passed.
InvalidUrl {
/// The URL passed.
url: String,
/// The reason the URL is invalid.
reason: &'static str,
},
/// An error occurred on the socket layer.
SocketError(io::Error),
/// The HTTP response was too short to even fit a HTTP 1.1 header.
HttpResponseTooShort {
/// The total length of the response.
actual: usize,
/// Minimum length we can parse.
needed: usize,
},
/// The HTTP response started with a HTTP/1.1 line which was not ASCII.
HttpResponseNonAsciiHello(Vec<u8>),
/// The HTTP response did not start with HTTP/1.1
HttpResponseBadHello {
/// Actual HTTP-whatever string.
actual: String,
/// The hello string of the HTTP version we support.
expected: String,
},
/// Could not parse the status value as a number.
HttpResponseBadStatus(String, num::ParseIntError),
/// Could not parse the status value as a number.
HttpResponseBadContentLength(String, num::ParseIntError),
/// The indicated content-length header exceeded our maximum.
HttpResponseContentLengthTooLarge {
/// The length indicated in the content-length header.
length: u64,
/// Our hard maximum on number of bytes we'll try to read.
max: u64,
},
/// Unexpected HTTP error code (non-200).
HttpErrorCode(u16),
/// Received EOF before getting as many bytes as were indicated by the content-length header.
IncompleteResponse {
/// The content-length header.
content_length: u64,
/// The number of bytes we actually read.
n_read: u64,
},
/// JSON parsing error.
Json(serde_json::Error),
}
impl Error {
/// Utility method to create [`Error::InvalidUrl`] variants.
fn url<U: Into<String>>(url: U, reason: &'static str) -> Error {
Error::InvalidUrl {
url: url.into(),
reason,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use Error::*;
match *self {
InvalidUrl {
ref url,
ref reason,
} => write!(f, "invalid URL '{}': {}", url, reason),
SocketError(ref e) => write!(f, "Couldn't connect to host: {}", e),
HttpResponseTooShort {
ref actual,
ref needed,
} => {
write!(f, "HTTP response too short: length {}, needed {}.", actual, needed)
}
HttpResponseNonAsciiHello(ref bytes) => {
write!(f, "HTTP response started with non-ASCII {:?}", bytes)
}
HttpResponseBadHello {
ref actual,
ref expected,
} => {
write!(f, "HTTP response started with `{}`; expected `{}`.", actual, expected)
}
HttpResponseBadStatus(ref status, ref err) => {
write!(f, "HTTP response had bad status code `{}`: {}.", status, err)
}
HttpResponseBadContentLength(ref len, ref err) => {
write!(f, "HTTP response had bad content length `{}`: {}.", len, err)
}
HttpResponseContentLengthTooLarge {
length,
max,
} => {
write!(f, "HTTP response content length {} exceeds our max {}.", length, max)
}
HttpErrorCode(c) => write!(f, "unexpected HTTP code: {}", c),
IncompleteResponse {
content_length,
n_read,
} => {
write!(
f,
"read {} bytes but HTTP response content-length header was {}.",
n_read, content_length
)
}
Json(ref e) => write!(f, "JSON error: {}", e),
}
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
use self::Error::*;
match *self {
InvalidUrl {
..
}
| HttpResponseTooShort {
..
}
| HttpResponseNonAsciiHello(..)
| HttpResponseBadHello {
..
}
| HttpResponseBadStatus(..)
| HttpResponseBadContentLength(..)
| HttpResponseContentLengthTooLarge {
..
}
| HttpErrorCode(_)
| IncompleteResponse {
..
} => None,
SocketError(ref e) => Some(e),
Json(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::SocketError(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Self {
Error::Json(e)
}
}
impl From<Error> for crate::Error {
fn from(e: Error) -> crate::Error {
match e {
Error::Json(e) => crate::Error::Json(e),
e => crate::Error::Transport(Box::new(e)),
}
}
}
/// Global mutex used by the fuzzing harness to inject data into the read end of the TCP stream.
#[cfg(jsonrpc_fuzz)]
pub static FUZZ_TCP_SOCK: Mutex<Option<io::Cursor<Vec<u8>>>> = Mutex::new(None);
#[cfg(jsonrpc_fuzz)]
#[derive(Clone, Debug)]
struct TcpStream;
#[cfg(jsonrpc_fuzz)]
mod impls {
use super::*;
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match *FUZZ_TCP_SOCK.lock().unwrap() {
Some(ref mut cursor) => io::Read::read(cursor, buf),
None => Ok(0),
}
}
}
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
| build | identifier_name |
simple_http.rs | line(&mut header_buf)?;
}
if header_buf.len() < 12 {
return Err(Error::HttpResponseTooShort {
actual: header_buf.len(),
needed: 12,
});
}
if!header_buf.as_bytes()[..12].is_ascii() {
return Err(Error::HttpResponseNonAsciiHello(header_buf.as_bytes()[..12].to_vec()));
}
if!header_buf.starts_with("HTTP/1.1 ") {
return Err(Error::HttpResponseBadHello {
actual: header_buf[0..9].into(),
expected: "HTTP/1.1 ".into(),
});
}
let response_code = match header_buf[9..12].parse::<u16>() {
Ok(n) => n,
Err(e) => return Err(Error::HttpResponseBadStatus(header_buf[9..12].into(), e)),
};
// Parse response header fields
let mut content_length = None;
loop {
header_buf.clear();
sock.read_line(&mut header_buf)?;
if header_buf == "\r\n" {
break;
}
header_buf.make_ascii_lowercase();
const CONTENT_LENGTH: &str = "content-length: ";
if let Some(s) = header_buf.strip_prefix(CONTENT_LENGTH) {
content_length = Some(
s.trim()
.parse::<u64>()
.map_err(|e| Error::HttpResponseBadContentLength(s.into(), e))?,
);
}
}
if response_code == 401 {
// There is no body in a 401 response, so don't try to read it
return Err(Error::HttpErrorCode(response_code));
}
// Read up to `content_length` bytes. Note that if there is no content-length
// header, we will assume an effectively infinite content length, i.e. we will
// just keep reading from the socket until it is closed.
let mut reader = match content_length {
None => sock.take(FINAL_RESP_ALLOC),
Some(n) if n > FINAL_RESP_ALLOC => {
return Err(Error::HttpResponseContentLengthTooLarge {
length: n,
max: FINAL_RESP_ALLOC,
});
}
Some(n) => sock.take(n),
};
// Attempt to parse the response. Don't check the HTTP error code until
// after parsing, since Bitcoin Core will often return a descriptive JSON
// error structure which is more useful than the error code.
match serde_json::from_reader(&mut reader) {
Ok(s) => {
if content_length.is_some() {
reader.bytes().count(); // consume any trailing bytes
}
Ok(s)
}
Err(e) => {
// If the response was not 200, assume the parse failed because of that
if response_code!= 200 {
Err(Error::HttpErrorCode(response_code))
} else {
// If it was 200 then probably it was legitimately a parse error
Err(e.into())
}
}
}
}
}
/// Does some very basic manual URL parsing because the uri/url crates
/// all have unicode-normalization as a dependency and that's broken.
fn check_url(url: &str) -> Result<(SocketAddr, String), Error> {
// The fallback port in case no port was provided.
// This changes when the http or https scheme was provided.
let mut fallback_port = DEFAULT_PORT;
// We need to get the hostname and the port.
// (1) Split scheme
let after_scheme = {
let mut split = url.splitn(2, "://");
let s = split.next().unwrap();
match split.next() {
None => s, // no scheme present
Some(after) => {
// Check if the scheme is http or https.
if s == "http" {
fallback_port = 80;
} else if s == "https" {
fallback_port = 443;
} else {
return Err(Error::url(url, "scheme should be http or https"));
}
after
}
}
};
// (2) split off path
let (before_path, path) = {
if let Some(slash) = after_scheme.find('/') {
(&after_scheme[0..slash], &after_scheme[slash..])
} else {
(after_scheme, "/")
}
};
// (3) split off auth part
let after_auth = {
let mut split = before_path.splitn(2, '@');
let s = split.next().unwrap();
split.next().unwrap_or(s)
};
// (4) Parse into socket address.
// At this point we either have <host_name> or <host_name_>:<port>
// `std::net::ToSocketAddrs` requires `&str` to have <host_name_>:<port> format.
let mut addr = match after_auth.to_socket_addrs() {
Ok(addr) => addr,
Err(_) => {
// Invalid socket address. Try to add port.
format!("{}:{}", after_auth, fallback_port).to_socket_addrs()?
}
};
match addr.next() {
Some(a) => Ok((a, path.to_owned())),
None => Err(Error::url(url, "invalid hostname: error extracting socket address")),
}
}
impl Transport for SimpleHttpTransport {
fn send_request(&self, req: Request) -> Result<Response, crate::Error> {
Ok(self.request(req)?)
}
fn send_batch(&self, reqs: &[Request]) -> Result<Vec<Response>, crate::Error> {
Ok(self.request(reqs)?)
}
fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "http://{}:{}{}", self.addr.ip(), self.addr.port(), self.path)
}
}
/// Builder for simple bitcoind [`SimpleHttpTransport`].
#[derive(Clone, Debug)]
pub struct Builder {
tp: SimpleHttpTransport,
}
impl Builder {
/// Constructs a new [`Builder`] with default configuration.
pub fn new() -> Builder {
Builder {
tp: SimpleHttpTransport::new(),
}
}
/// Sets the timeout after which requests will abort if they aren't finished.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.tp.timeout = timeout;
self
}
/// Sets the URL of the server to the transport.
pub fn url(mut self, url: &str) -> Result<Self, Error> {
self.tp.set_url(url)?;
Ok(self)
}
/// Adds authentication information to the transport.
pub fn auth<S: AsRef<str>>(mut self, user: S, pass: Option<S>) -> Self {
let mut auth = user.as_ref().to_owned();
auth.push(':');
if let Some(ref pass) = pass {
auth.push_str(pass.as_ref());
}
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(auth.as_bytes())));
self
}
/// Adds authentication information to the transport using a cookie string ('user:pass').
pub fn cookie_auth<S: AsRef<str>>(mut self, cookie: S) -> Self {
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(cookie.as_ref().as_bytes())));
self
}
/// Adds proxy address to the transport for SOCKS5 proxy.
#[cfg(feature = "proxy")]
pub fn proxy_addr<S: AsRef<str>>(mut self, proxy_addr: S) -> Result<Self, Error> {
// We don't expect path in proxy address.
self.tp.proxy_addr = check_url(proxy_addr.as_ref())?.0;
Ok(self)
}
/// Adds optional proxy authentication as ('username', 'password').
#[cfg(feature = "proxy")]
pub fn proxy_auth<S: AsRef<str>>(mut self, user: S, pass: S) -> Self {
self.tp.proxy_auth =
Some((user, pass)).map(|(u, p)| (u.as_ref().to_string(), p.as_ref().to_string()));
self
}
/// Builds the final [`SimpleHttpTransport`].
pub fn build(self) -> SimpleHttpTransport {
self.tp
}
}
impl Default for Builder {
fn default() -> Self {
Builder::new()
}
}
impl crate::Client {
/// Creates a new JSON-RPC client using a bare-minimum HTTP transport.
pub fn simple_http(
url: &str,
user: Option<String>,
pass: Option<String>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
Ok(crate::Client::with_transport(builder.build()))
}
/// Creates a new JSON_RPC client using a HTTP-Socks5 proxy transport.
#[cfg(feature = "proxy")]
pub fn http_proxy(
url: &str,
user: Option<String>,
pass: Option<String>,
proxy_addr: &str,
proxy_auth: Option<(&str, &str)>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
builder = builder.proxy_addr(proxy_addr)?;
if let Some((user, pass)) = proxy_auth {
builder = builder.proxy_auth(user, pass);
}
let tp = builder.build();
Ok(crate::Client::with_transport(tp))
}
}
/// Error that can happen when sending requests.
#[derive(Debug)]
pub enum Error {
/// An invalid URL was passed.
InvalidUrl {
/// The URL passed.
url: String,
/// The reason the URL is invalid.
reason: &'static str,
},
/// An error occurred on the socket layer.
SocketError(io::Error),
/// The HTTP response was too short to even fit a HTTP 1.1 header.
HttpResponseTooShort {
/// The total length of the response.
actual: usize,
/// Minimum length we can parse.
needed: usize,
},
/// The HTTP response started with a HTTP/1.1 line which was not ASCII.
HttpResponseNonAsciiHello(Vec<u8>),
/// The HTTP response did not start with HTTP/1.1
HttpResponseBadHello {
/// Actual HTTP-whatever string.
actual: String,
/// The hello string of the HTTP version we support.
expected: String,
},
/// Could not parse the status value as a number.
HttpResponseBadStatus(String, num::ParseIntError),
/// Could not parse the status value as a number.
HttpResponseBadContentLength(String, num::ParseIntError),
/// The indicated content-length header exceeded our maximum.
HttpResponseContentLengthTooLarge {
/// The length indicated in the content-length header.
length: u64,
/// Our hard maximum on number of bytes we'll try to read.
max: u64,
},
/// Unexpected HTTP error code (non-200).
HttpErrorCode(u16),
/// Received EOF before getting as many bytes as were indicated by the content-length header.
IncompleteResponse {
/// The content-length header.
content_length: u64,
/// The number of bytes we actually read.
n_read: u64,
},
/// JSON parsing error.
Json(serde_json::Error),
}
impl Error {
/// Utility method to create [`Error::InvalidUrl`] variants.
fn url<U: Into<String>>(url: U, reason: &'static str) -> Error {
Error::InvalidUrl {
url: url.into(),
reason,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use Error::*;
match *self {
InvalidUrl {
ref url,
ref reason,
} => write!(f, "invalid URL '{}': {}", url, reason),
SocketError(ref e) => write!(f, "Couldn't connect to host: {}", e),
HttpResponseTooShort {
ref actual,
ref needed,
} => {
write!(f, "HTTP response too short: length {}, needed {}.", actual, needed)
}
HttpResponseNonAsciiHello(ref bytes) => {
write!(f, "HTTP response started with non-ASCII {:?}", bytes)
}
HttpResponseBadHello {
ref actual,
ref expected,
} => {
write!(f, "HTTP response started with `{}`; expected `{}`.", actual, expected)
}
HttpResponseBadStatus(ref status, ref err) => {
write!(f, "HTTP response had bad status code `{}`: {}.", status, err)
}
HttpResponseBadContentLength(ref len, ref err) => {
write!(f, "HTTP response had bad content length `{}`: {}.", len, err)
}
HttpResponseContentLengthTooLarge {
length,
max,
} => {
write!(f, "HTTP response content length {} exceeds our max {}.", length, max)
}
HttpErrorCode(c) => write!(f, "unexpected HTTP code: {}", c),
IncompleteResponse {
content_length,
n_read,
} => {
write!(
f,
"read {} bytes but HTTP response content-length header was {}.",
n_read, content_length
)
}
Json(ref e) => write!(f, "JSON error: {}", e),
}
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
use self::Error::*;
match *self {
InvalidUrl {
..
}
| HttpResponseTooShort {
..
}
| HttpResponseNonAsciiHello(..)
| HttpResponseBadHello {
..
}
| HttpResponseBadStatus(..)
| HttpResponseBadContentLength(..)
| HttpResponseContentLengthTooLarge {
..
}
| HttpErrorCode(_)
| IncompleteResponse {
..
} => None,
SocketError(ref e) => Some(e),
Json(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::SocketError(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Self {
Error::Json(e)
}
}
impl From<Error> for crate::Error {
fn from(e: Error) -> crate::Error {
match e {
Error::Json(e) => crate::Error::Json(e),
e => crate::Error::Transport(Box::new(e)),
}
}
}
/// Global mutex used by the fuzzing harness to inject data into the read end of the TCP stream.
#[cfg(jsonrpc_fuzz)]
pub static FUZZ_TCP_SOCK: Mutex<Option<io::Cursor<Vec<u8>>>> = Mutex::new(None);
#[cfg(jsonrpc_fuzz)]
#[derive(Clone, Debug)]
struct TcpStream;
#[cfg(jsonrpc_fuzz)]
mod impls {
use super::*;
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match *FUZZ_TCP_SOCK.lock().unwrap() {
Some(ref mut cursor) => io::Read::read(cursor, buf),
None => Ok(0),
}
}
}
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
io::sink().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl TcpStream {
pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<Self> {
Ok(TcpStream)
}
pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
Ok(())
}
pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use std::net;
#[cfg(feature = "proxy")]
use std::str::FromStr;
use super::*;
use crate::Client;
#[test]
fn test_urls() {
let addr: net::SocketAddr = ("localhost", 22).to_socket_addrs().unwrap().next().unwrap();
let urls = [
"localhost:22",
"http://localhost:22/",
"https://localhost:22/walletname/stuff?it=working",
"http://me:weak@localhost:22/wallet",
];
for u in &urls {
let tp = Builder::new().url(u).unwrap().build();
assert_eq!(tp.addr, addr);
}
// Default port and 80 and 443 fill-in.
let addr: net::SocketAddr = ("localhost", 80).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("http://localhost/").unwrap().build();
assert_eq!(tp.addr, addr);
let addr: net::SocketAddr = ("localhost", 443).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("https://localhost/").unwrap().build();
assert_eq!(tp.addr, addr);
let addr: net::SocketAddr =
("localhost", super::DEFAULT_PORT).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("localhost").unwrap().build();
assert_eq!(tp.addr, addr);
let valid_urls = [
"localhost",
"127.0.0.1:8080",
"http://127.0.0.1:8080/",
"http://127.0.0.1:8080/rpc/test",
"https://127.0.0.1/rpc/test",
"http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8300",
"http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]",
];
for u in &valid_urls {
let (addr, path) = check_url(u).unwrap();
let builder = Builder::new().url(u).unwrap_or_else(|_| panic!("error for: {}", u));
assert_eq!(builder.tp.addr, addr);
assert_eq!(builder.tp.path, path);
assert_eq!(builder.tp.timeout, DEFAULT_TIMEOUT);
assert_eq!(builder.tp.basic_auth, None);
#[cfg(feature = "proxy")]
assert_eq!(builder.tp.proxy_addr, SocketAddr::from_str("127.0.0.1:9050").unwrap());
}
let invalid_urls = [
"127.0.0.1.0:8080",
"httpx://127.0.0.1:8080/",
"ftp://127.0.0.1:8080/rpc/test",
"http://127.0.0./rpc/test", | random_line_split |
||
ser.rs | }
impl<'a> Drop for Serializer<'a> {
fn drop(&mut self) {
// Drop layers in reverse order.
while!self.stack.is_empty() {
self.stack.pop();
}
}
}
#[allow(nonstandard_style)]
struct write_u64 {
major: u8,
v: u64,
}
impl write_u64 {
fn into(self, out: &'_ mut (dyn io::Write)) -> io::Result<()> {
let Self { major, v: value } = self;
let mask = major << 5;
macro_rules! with_uNs {( $($uN:ident)<* ) => ({
mod c {
$(
pub mod $uN { pub const MAX: u64 = ::core::$uN::MAX as _; }
)*
pub mod u8 { pub const MAX: u64 = ::core::u8::MAX as _; }
}
const SMALL_U8_MAX: u64 = 0x17;
#[allow(nonstandard_style)]
enum MaskFor {
u8 = (SMALL_U8_MAX + 1) as _,
$($uN),*
}
match value {
0..= SMALL_U8_MAX => out.write_all(&[mask | (value as u8)]),
0..= c::u8::MAX => out.write_all(&[
mask | (MaskFor::u8 as u8),
value as u8,
]),
$(
0..= c::$uN::MAX => {
let value = value as $uN;
let ref mut buf = [0; 1 + ::core::mem::size_of::<$uN>()];
buf[0] = mask | (MaskFor::$uN as u8);
buf[1..].copy_from_slice(&value.to_be_bytes());
out.write_all(buf)
},
)*
_ => unreachable!(),
}
})}
with_uNs!(u16 < u32 < u64)
}
}
/// Serialize any serializable type as a CBOR byte sequence into a
/// [`Write`][io::Write]able sink.
///
/// Returns:
/// - `Ok(())` on success.
/// - `Err(Some(io_error))` on I/O failure.
/// - `Err(None)` on serialization error (unrepresentable integer).
pub fn to_writer<'value>(
out: &'_ mut dyn io::Write,
value: &'value dyn Serialize,
) -> Result<(), Option<io::Error>> {
// Borrow-checker-friendly "closure"
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! write { ($bytes:expr) => ({
out.write_all($bytes).map_err(Some)
})}
// Use a manual stack to avoid (stack-allocated) recursion.
let mut stack: Vec<Layer<'value>> = vec![Layer::Single(value)];
// where:
enum Layer<'value> {
Seq(Box<dyn Seq<'value> + 'value>),
Map(Box<dyn Map<'value> + 'value>),
Single(&'value dyn Serialize),
}
while let Some(last) = stack.last_mut() {
let view: ValueView<'value> = match last {
&mut Layer::Single(value) => {
let view = value.view();
drop(stack.pop());
view
}
Layer::Seq(seq) => {
match seq.next() {
Some(value) => stack.push(Layer::Single(value)),
None => drop(stack.pop()),
}
continue;
}
Layer::Map(map) => {
match map.next() {
Some((key, value)) => {
stack.push(Layer::Single(value));
stack.push(Layer::Single(key));
}
None => drop(stack.pop()),
}
continue;
}
};
match view {
ValueView::Null => write!(&[0xf6])?,
ValueView::Bool(b) => write!(&[0xf4 | (b as u8)])?,
ValueView::Str(s) => {
write_u64 {
major: 3,
v: s.len() as u64,
}
.into(out)?;
write!(s.as_bytes())?;
}
ValueView::Bytes(bs) => {
write_u64 {
major: 2,
v: bs.len() as u64,
}
.into(out)?;
write!(&*bs)?;
}
ValueView::Int(i) => {
const MIN: i128 = -(1_i128 << 64);
const MAX: i128 = ::core::u64::MAX as _;
match i {
MIN..=-1 => write_u64 {
major: 1,
v: (-(i + 1)) as u64,
}
.into(out)?,
0..=MAX => write_u64 {
major: 0,
v: i as u64,
}
.into(out)?,
_ => err!("Cannot serialize integer {:?} as CBOR: out of range", i),
}
}
ValueView::F64(f) if f.is_infinite() => write!(if f.is_sign_positive() {
&[0xf9, 0x7c, 0x00]
} else {
&[0xf9, 0xfc, 0x00]
})?,
ValueView::F64(f) if f.is_nan() => {
write!(&[0xf9, 0x7e, 0x00])?;
}
ValueView::F64(f) => {
// Finite float.
let f_16;
let f_32;
match () {
_case
if {
f_16 = ::half::f16::from_f64(f);
f64::from(f_16) == f
} =>
{
let ref mut buf = [0xf9, 0, 0];
buf[1..].copy_from_slice(&f_16.to_bits().to_be_bytes());
write!(buf)?;
}
_case
if {
f_32 = f as f32;
f64::from(f_32) == f
} =>
{
let ref mut buf = [0xfa, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f_32.to_bits().to_be_bytes());
write!(buf)?;
}
_default => {
let ref mut buf = [0xfb, 0, 0, 0, 0, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f.to_bits().to_be_bytes());
write!(buf)?;
}
}
}
ValueView::Seq(mut seq) => {
let count = seq.remaining();
write_u64 {
major: 4,
v: count as _,
}
.into(out)?;
stack.push(Layer::Seq(seq));
}
ValueView::Map(mut map) => {
let count = map.remaining();
write_u64 {
major: 5,
v: count as _,
}
.into(out)?;
stack.push(Layer::Map(map));
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
//! Most of these tests have been taken from
//! https://github.com/pyfisch/cbor/blob/a218403a52e60c991313f429e4acc05cce81ce25/tests/ser.rs
use super::*;
use crate::{
cbor::{value::*, *},
Serialize,
};
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! assert_eq_hex {(
$left:expr,
$right:expr $(,)?
) => (
match (&$left[..], &$right[..]) {
(ref left, ref right) => {
if <[u8] as ::core::cmp::PartialEq>::ne(left, right) {
panic!(
"assertion failed: (`{}` == `{}`)\n{}]",
stringify!($left),
stringify!($right),
(0..left.len().max(right.len()))
.map(|i| match (left.get(i), right.get(i)) {
(Some(l), Some(r)) => format!(
" {:01}|{:02x} – {:01}|{:02x},\n",
l >> 5, l & 0x1f,
r >> 5, r & 0x1f
),
(Some(l), _) =>
format!(" {:01}|{:02x} - ____,\n", l >> 5, l & 0x1f),
(_, Some(r)) =>
format!("____ - {:01}|{:02x},\n", r >> 5, r & 0x1f),
_ => unreachable!(),
})
.collect::<String>(),
);
}
}
}
)}
#[test]
fn test_str() {
serialize_and_compare("foobar", b"ffoobar");
}
#[test]
fn test_list() {
serialize_and_compare(&[1, 2, 3][..], b"\x83\x01\x02\x03");
}
#[test]
fn test_float() {
serialize_and_compare(12.3f64, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_integer() {
// u8
serialize_and_compare(24, b"\x18\x18");
// i8
serialize_and_compare(-5, b"\x24");
// i16
serialize_and_compare(-300, b"\x39\x01\x2b");
// i32
serialize_and_compare(-23567997, b"\x3a\x01\x67\x9e\x7c");
// u64
serialize_and_compare(::core::u64::MAX, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
}
fn serialize_and_compare<T: Serialize>(value: T, expected: &[u8]) {
assert_eq_hex!(&to_vec(&value).unwrap()[..], expected,);
}
mod std {
use super::*;
use ::std::collections::BTreeMap;
#[test]
fn test_string() {
let value = "foobar".to_owned();
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"ffoobar");
}
#[test]
fn test_list() {
let value = vec![1, 2, 3];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x01\x02\x03");
}
#[test]
fn test_list_strings() {
let value = vec!["1", "2", "3"];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x611\x612\x613");
}
#[test]
fn test_object() {
use ::std::collections::HashMap;
let mut object = HashMap::new();
object.insert("a".to_owned(), "A".to_owned());
object.insert("b".to_owned(), "B".to_owned());
object.insert("c".to_owned(), "C".to_owned());
object.insert("d".to_owned(), "D".to_owned());
object.insert("e".to_owned(), "E".to_owned());
let vec = to_vec(&object).unwrap();
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_list_keys() {
let mut object = BTreeMap::new();
object.insert(vec![0i64], ());
object.insert(vec![100i64], ());
object.insert(vec![-1i64], ());
object.insert(vec![-2i64], ());
object.insert(vec![0i64, 0i64], ());
object.insert(vec![0i64, -1i64], ());
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 129, 0, 246, 129, 24, 100, 246, 129, 32, 246, 129, 33, 246, 130, 0, 0,
246, 130, 0, 32, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_object_keys() {
use ::std::iter::FromIterator;
let mut object = BTreeMap::new();
let keys = vec![
vec!["a"],
vec!["b"],
vec!["c"],
vec!["d"],
vec!["aa"],
vec!["a", "aa"],
]
.into_iter()
.map(|v| BTreeMap::from_iter(v.into_iter().map(|s| (s.to_owned(), ()))));
for key in keys {
object.insert(key, ());
}
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 161, 97, 97, 246, 246, 161, 97, 98, 246, 246, 161, 97, 99, 246, 246, 161,
97, 100, 246, 246, 161, 98, 97, 97, 246, 246, 162, 97, 97, 246, 98, 97, 97,
246, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_float() {
let vec = to_vec(&12.3f64).unwrap();
assert_eq_hex!(vec, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_f32() {
let vec = to_vec(&4000.5f32).unwrap();
assert_eq_hex!(vec, b"\xfa\x45\x7a\x08\x00");
}
#[test]
fn test_infinity() {
let vec = to_vec(&::std::f64::INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9|\x00");
}
#[test]
fn test_neg_infinity() {
let vec = to_vec(&::std::f64::NEG_INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9\xfc\x00");
}
#[test]
fn test_nan() {
let vec = to_vec(&::std::f32::NAN).unwrap();
assert_eq_hex!(vec, b"\xf9\x7e\x00");
}
#[test]
fn test_integer() {
// u8
let vec = to_vec(&24). | enum Layer<'a> {
Seq(Box<dyn Seq<'a> + 'a>),
Map(Box<dyn Map<'a> + 'a>), | random_line_split |
|
ser.rs | (self, out: &'_ mut (dyn io::Write)) -> io::Result<()> {
let Self { major, v: value } = self;
let mask = major << 5;
macro_rules! with_uNs {( $($uN:ident)<* ) => ({
mod c {
$(
pub mod $uN { pub const MAX: u64 = ::core::$uN::MAX as _; }
)*
pub mod u8 { pub const MAX: u64 = ::core::u8::MAX as _; }
}
const SMALL_U8_MAX: u64 = 0x17;
#[allow(nonstandard_style)]
enum MaskFor {
u8 = (SMALL_U8_MAX + 1) as _,
$($uN),*
}
match value {
0..= SMALL_U8_MAX => out.write_all(&[mask | (value as u8)]),
0..= c::u8::MAX => out.write_all(&[
mask | (MaskFor::u8 as u8),
value as u8,
]),
$(
0..= c::$uN::MAX => {
let value = value as $uN;
let ref mut buf = [0; 1 + ::core::mem::size_of::<$uN>()];
buf[0] = mask | (MaskFor::$uN as u8);
buf[1..].copy_from_slice(&value.to_be_bytes());
out.write_all(buf)
},
)*
_ => unreachable!(),
}
})}
with_uNs!(u16 < u32 < u64)
}
}
/// Serialize any serializable type as a CBOR byte sequence into a
/// [`Write`][io::Write]able sink.
///
/// Returns:
/// - `Ok(())` on success.
/// - `Err(Some(io_error))` on I/O failure.
/// - `Err(None)` on serialization error (unrepresentable integer).
pub fn to_writer<'value>(
out: &'_ mut dyn io::Write,
value: &'value dyn Serialize,
) -> Result<(), Option<io::Error>> {
// Borrow-checker-friendly "closure"
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! write { ($bytes:expr) => ({
out.write_all($bytes).map_err(Some)
})}
// Use a manual stack to avoid (stack-allocated) recursion.
let mut stack: Vec<Layer<'value>> = vec![Layer::Single(value)];
// where:
enum Layer<'value> {
Seq(Box<dyn Seq<'value> + 'value>),
Map(Box<dyn Map<'value> + 'value>),
Single(&'value dyn Serialize),
}
while let Some(last) = stack.last_mut() {
let view: ValueView<'value> = match last {
&mut Layer::Single(value) => {
let view = value.view();
drop(stack.pop());
view
}
Layer::Seq(seq) => {
match seq.next() {
Some(value) => stack.push(Layer::Single(value)),
None => drop(stack.pop()),
}
continue;
}
Layer::Map(map) => {
match map.next() {
Some((key, value)) => {
stack.push(Layer::Single(value));
stack.push(Layer::Single(key));
}
None => drop(stack.pop()),
}
continue;
}
};
match view {
ValueView::Null => write!(&[0xf6])?,
ValueView::Bool(b) => write!(&[0xf4 | (b as u8)])?,
ValueView::Str(s) => {
write_u64 {
major: 3,
v: s.len() as u64,
}
.into(out)?;
write!(s.as_bytes())?;
}
ValueView::Bytes(bs) => {
write_u64 {
major: 2,
v: bs.len() as u64,
}
.into(out)?;
write!(&*bs)?;
}
ValueView::Int(i) => {
const MIN: i128 = -(1_i128 << 64);
const MAX: i128 = ::core::u64::MAX as _;
match i {
MIN..=-1 => write_u64 {
major: 1,
v: (-(i + 1)) as u64,
}
.into(out)?,
0..=MAX => write_u64 {
major: 0,
v: i as u64,
}
.into(out)?,
_ => err!("Cannot serialize integer {:?} as CBOR: out of range", i),
}
}
ValueView::F64(f) if f.is_infinite() => write!(if f.is_sign_positive() {
&[0xf9, 0x7c, 0x00]
} else {
&[0xf9, 0xfc, 0x00]
})?,
ValueView::F64(f) if f.is_nan() => {
write!(&[0xf9, 0x7e, 0x00])?;
}
ValueView::F64(f) => {
// Finite float.
let f_16;
let f_32;
match () {
_case
if {
f_16 = ::half::f16::from_f64(f);
f64::from(f_16) == f
} =>
{
let ref mut buf = [0xf9, 0, 0];
buf[1..].copy_from_slice(&f_16.to_bits().to_be_bytes());
write!(buf)?;
}
_case
if {
f_32 = f as f32;
f64::from(f_32) == f
} =>
{
let ref mut buf = [0xfa, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f_32.to_bits().to_be_bytes());
write!(buf)?;
}
_default => {
let ref mut buf = [0xfb, 0, 0, 0, 0, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f.to_bits().to_be_bytes());
write!(buf)?;
}
}
}
ValueView::Seq(mut seq) => {
let count = seq.remaining();
write_u64 {
major: 4,
v: count as _,
}
.into(out)?;
stack.push(Layer::Seq(seq));
}
ValueView::Map(mut map) => {
let count = map.remaining();
write_u64 {
major: 5,
v: count as _,
}
.into(out)?;
stack.push(Layer::Map(map));
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
//! Most of these tests have been taken from
//! https://github.com/pyfisch/cbor/blob/a218403a52e60c991313f429e4acc05cce81ce25/tests/ser.rs
use super::*;
use crate::{
cbor::{value::*, *},
Serialize,
};
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! assert_eq_hex {(
$left:expr,
$right:expr $(,)?
) => (
match (&$left[..], &$right[..]) {
(ref left, ref right) => {
if <[u8] as ::core::cmp::PartialEq>::ne(left, right) {
panic!(
"assertion failed: (`{}` == `{}`)\n{}]",
stringify!($left),
stringify!($right),
(0..left.len().max(right.len()))
.map(|i| match (left.get(i), right.get(i)) {
(Some(l), Some(r)) => format!(
" {:01}|{:02x} – {:01}|{:02x},\n",
l >> 5, l & 0x1f,
r >> 5, r & 0x1f
),
(Some(l), _) =>
format!(" {:01}|{:02x} - ____,\n", l >> 5, l & 0x1f),
(_, Some(r)) =>
format!("____ - {:01}|{:02x},\n", r >> 5, r & 0x1f),
_ => unreachable!(),
})
.collect::<String>(),
);
}
}
}
)}
#[test]
fn test_str() {
serialize_and_compare("foobar", b"ffoobar");
}
#[test]
fn test_list() {
serialize_and_compare(&[1, 2, 3][..], b"\x83\x01\x02\x03");
}
#[test]
fn test_float() {
serialize_and_compare(12.3f64, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_integer() {
// u8
serialize_and_compare(24, b"\x18\x18");
// i8
serialize_and_compare(-5, b"\x24");
// i16
serialize_and_compare(-300, b"\x39\x01\x2b");
// i32
serialize_and_compare(-23567997, b"\x3a\x01\x67\x9e\x7c");
// u64
serialize_and_compare(::core::u64::MAX, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
}
fn serialize_and_compare<T: Serialize>(value: T, expected: &[u8]) {
assert_eq_hex!(&to_vec(&value).unwrap()[..], expected,);
}
mod std {
use super::*;
use ::std::collections::BTreeMap;
#[test]
fn test_string() {
let value = "foobar".to_owned();
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"ffoobar");
}
#[test]
fn test_list() {
let value = vec![1, 2, 3];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x01\x02\x03");
}
#[test]
fn test_list_strings() {
let value = vec!["1", "2", "3"];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x611\x612\x613");
}
#[test]
fn test_object() {
use ::std::collections::HashMap;
let mut object = HashMap::new();
object.insert("a".to_owned(), "A".to_owned());
object.insert("b".to_owned(), "B".to_owned());
object.insert("c".to_owned(), "C".to_owned());
object.insert("d".to_owned(), "D".to_owned());
object.insert("e".to_owned(), "E".to_owned());
let vec = to_vec(&object).unwrap();
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_list_keys() {
let mut object = BTreeMap::new();
object.insert(vec![0i64], ());
object.insert(vec![100i64], ());
object.insert(vec![-1i64], ());
object.insert(vec![-2i64], ());
object.insert(vec![0i64, 0i64], ());
object.insert(vec![0i64, -1i64], ());
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 129, 0, 246, 129, 24, 100, 246, 129, 32, 246, 129, 33, 246, 130, 0, 0,
246, 130, 0, 32, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_object_keys() {
use ::std::iter::FromIterator;
let mut object = BTreeMap::new();
let keys = vec![
vec!["a"],
vec!["b"],
vec!["c"],
vec!["d"],
vec!["aa"],
vec!["a", "aa"],
]
.into_iter()
.map(|v| BTreeMap::from_iter(v.into_iter().map(|s| (s.to_owned(), ()))));
for key in keys {
object.insert(key, ());
}
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 161, 97, 97, 246, 246, 161, 97, 98, 246, 246, 161, 97, 99, 246, 246, 161,
97, 100, 246, 246, 161, 98, 97, 97, 246, 246, 162, 97, 97, 246, 98, 97, 97,
246, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_float() {
let vec = to_vec(&12.3f64).unwrap();
assert_eq_hex!(vec, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_f32() {
let vec = to_vec(&4000.5f32).unwrap();
assert_eq_hex!(vec, b"\xfa\x45\x7a\x08\x00");
}
#[test]
fn test_infinity() {
let vec = to_vec(&::std::f64::INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9|\x00");
}
#[test]
fn test_neg_infinity() {
let vec = to_vec(&::std::f64::NEG_INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9\xfc\x00");
}
#[test]
fn test_nan() {
let vec = to_vec(&::std::f32::NAN).unwrap();
assert_eq_hex!(vec, b"\xf9\x7e\x00");
}
#[test]
fn test_integer() {
// u8
let vec = to_vec(&24).unwrap();
assert_eq_hex!(vec, b"\x18\x18");
// i8
let vec = to_vec(&-5).unwrap();
assert_eq_hex!(vec, b"\x24");
// i16
let vec = to_vec(&-300).unwrap();
assert_eq_hex!(vec, b"\x39\x01\x2b");
// i32
| into | identifier_name |
|
ser.rs | ..=-1 => write_u64 {
major: 1,
v: (-(i + 1)) as u64,
}
.into(out)?,
0..=MAX => write_u64 {
major: 0,
v: i as u64,
}
.into(out)?,
_ => err!("Cannot serialize integer {:?} as CBOR: out of range", i),
}
}
ValueView::F64(f) if f.is_infinite() => write!(if f.is_sign_positive() {
&[0xf9, 0x7c, 0x00]
} else {
&[0xf9, 0xfc, 0x00]
})?,
ValueView::F64(f) if f.is_nan() => {
write!(&[0xf9, 0x7e, 0x00])?;
}
ValueView::F64(f) => {
// Finite float.
let f_16;
let f_32;
match () {
_case
if {
f_16 = ::half::f16::from_f64(f);
f64::from(f_16) == f
} =>
{
let ref mut buf = [0xf9, 0, 0];
buf[1..].copy_from_slice(&f_16.to_bits().to_be_bytes());
write!(buf)?;
}
_case
if {
f_32 = f as f32;
f64::from(f_32) == f
} =>
{
let ref mut buf = [0xfa, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f_32.to_bits().to_be_bytes());
write!(buf)?;
}
_default => {
let ref mut buf = [0xfb, 0, 0, 0, 0, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f.to_bits().to_be_bytes());
write!(buf)?;
}
}
}
ValueView::Seq(mut seq) => {
let count = seq.remaining();
write_u64 {
major: 4,
v: count as _,
}
.into(out)?;
stack.push(Layer::Seq(seq));
}
ValueView::Map(mut map) => {
let count = map.remaining();
write_u64 {
major: 5,
v: count as _,
}
.into(out)?;
stack.push(Layer::Map(map));
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
//! Most of these tests have been taken from
//! https://github.com/pyfisch/cbor/blob/a218403a52e60c991313f429e4acc05cce81ce25/tests/ser.rs
use super::*;
use crate::{
cbor::{value::*, *},
Serialize,
};
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! assert_eq_hex {(
$left:expr,
$right:expr $(,)?
) => (
match (&$left[..], &$right[..]) {
(ref left, ref right) => {
if <[u8] as ::core::cmp::PartialEq>::ne(left, right) {
panic!(
"assertion failed: (`{}` == `{}`)\n{}]",
stringify!($left),
stringify!($right),
(0..left.len().max(right.len()))
.map(|i| match (left.get(i), right.get(i)) {
(Some(l), Some(r)) => format!(
" {:01}|{:02x} – {:01}|{:02x},\n",
l >> 5, l & 0x1f,
r >> 5, r & 0x1f
),
(Some(l), _) =>
format!(" {:01}|{:02x} - ____,\n", l >> 5, l & 0x1f),
(_, Some(r)) =>
format!("____ - {:01}|{:02x},\n", r >> 5, r & 0x1f),
_ => unreachable!(),
})
.collect::<String>(),
);
}
}
}
)}
#[test]
fn test_str() {
serialize_and_compare("foobar", b"ffoobar");
}
#[test]
fn test_list() {
serialize_and_compare(&[1, 2, 3][..], b"\x83\x01\x02\x03");
}
#[test]
fn test_float() {
serialize_and_compare(12.3f64, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_integer() {
// u8
serialize_and_compare(24, b"\x18\x18");
// i8
serialize_and_compare(-5, b"\x24");
// i16
serialize_and_compare(-300, b"\x39\x01\x2b");
// i32
serialize_and_compare(-23567997, b"\x3a\x01\x67\x9e\x7c");
// u64
serialize_and_compare(::core::u64::MAX, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
}
fn serialize_and_compare<T: Serialize>(value: T, expected: &[u8]) {
assert_eq_hex!(&to_vec(&value).unwrap()[..], expected,);
}
mod std {
use super::*;
use ::std::collections::BTreeMap;
#[test]
fn test_string() {
let value = "foobar".to_owned();
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"ffoobar");
}
#[test]
fn test_list() {
let value = vec![1, 2, 3];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x01\x02\x03");
}
#[test]
fn test_list_strings() {
let value = vec!["1", "2", "3"];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x611\x612\x613");
}
#[test]
fn test_object() {
use ::std::collections::HashMap;
let mut object = HashMap::new();
object.insert("a".to_owned(), "A".to_owned());
object.insert("b".to_owned(), "B".to_owned());
object.insert("c".to_owned(), "C".to_owned());
object.insert("d".to_owned(), "D".to_owned());
object.insert("e".to_owned(), "E".to_owned());
let vec = to_vec(&object).unwrap();
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_list_keys() {
let mut object = BTreeMap::new();
object.insert(vec![0i64], ());
object.insert(vec![100i64], ());
object.insert(vec![-1i64], ());
object.insert(vec![-2i64], ());
object.insert(vec![0i64, 0i64], ());
object.insert(vec![0i64, -1i64], ());
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 129, 0, 246, 129, 24, 100, 246, 129, 32, 246, 129, 33, 246, 130, 0, 0,
246, 130, 0, 32, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_object_keys() {
use ::std::iter::FromIterator;
let mut object = BTreeMap::new();
let keys = vec![
vec!["a"],
vec!["b"],
vec!["c"],
vec!["d"],
vec!["aa"],
vec!["a", "aa"],
]
.into_iter()
.map(|v| BTreeMap::from_iter(v.into_iter().map(|s| (s.to_owned(), ()))));
for key in keys {
object.insert(key, ());
}
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 161, 97, 97, 246, 246, 161, 97, 98, 246, 246, 161, 97, 99, 246, 246, 161,
97, 100, 246, 246, 161, 98, 97, 97, 246, 246, 162, 97, 97, 246, 98, 97, 97,
246, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_float() {
let vec = to_vec(&12.3f64).unwrap();
assert_eq_hex!(vec, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_f32() {
let vec = to_vec(&4000.5f32).unwrap();
assert_eq_hex!(vec, b"\xfa\x45\x7a\x08\x00");
}
#[test]
fn test_infinity() {
let vec = to_vec(&::std::f64::INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9|\x00");
}
#[test]
fn test_neg_infinity() {
let vec = to_vec(&::std::f64::NEG_INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9\xfc\x00");
}
#[test]
fn test_nan() {
let vec = to_vec(&::std::f32::NAN).unwrap();
assert_eq_hex!(vec, b"\xf9\x7e\x00");
}
#[test]
fn test_integer() {
// u8
let vec = to_vec(&24).unwrap();
assert_eq_hex!(vec, b"\x18\x18");
// i8
let vec = to_vec(&-5).unwrap();
assert_eq_hex!(vec, b"\x24");
// i16
let vec = to_vec(&-300).unwrap();
assert_eq_hex!(vec, b"\x39\x01\x2b");
// i32
let vec = to_vec(&-23567997).unwrap();
assert_eq_hex!(vec, b"\x3a\x01\x67\x9e\x7c");
// u64
let vec = to_vec(&::std::u64::MAX).unwrap();
assert_eq_hex!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
}
// #[test]
// fn test_self_describing() {
// let mut vec = Vec::new();
// {
// let mut serializer = ser::Serializer::new(&mut vec);
// serializer.self_describe().unwrap();
// serializer.serialize_u64(9).unwrap();
// }
// assert_eq_hex!(vec, b"\xd9\xd9\xf7\x09");
// }
// #[test]
// fn test_ip_addr() {
// use ::std::net::Ipv4Addr;
// let addr = Ipv4Addr::new(8, 8, 8, 8);
// let vec = to_vec(&addr).unwrap();
// println!("{:?}", vec);
// assert_eq_hex!(vec.len(), 5);
// let test_addr: Ipv4Addr = from_slice(&vec).unwrap();
// assert_eq_hex!(addr, test_addr);
// }
/// Test all of CBOR's fixed-length byte string types
#[test]
fn test_byte_string() {
// Very short byte strings have 1-byte headers
let short = vec![0_u8, 1, 2, 255];
let short_s = to_vec(&short).unwrap();
assert_eq_hex!(&short_s[..], [0x44, 0, 1, 2, 255]);
// byte strings > 23 bytes have 2-byte headers
let medium = vec![
0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
255,
];
let medium_s = to_vec(&medium).unwrap();
assert_eq_hex!(
&medium_s[..],
[
0x58, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 255
]
);
// byte strings ≥ 256 bytes have 3-byte headers
let long_vec = (0..256).map(|i| (i & 0xFF) as u8).collect::<Vec<_>>();
let long_s = to_vec(&long_vec).unwrap();
assert_eq_hex!(&long_s[0..3], [0x59, 1, 0]);
assert_eq_hex!(&long_s[3..], &long_vec[..]);
// byte strings ≥ 2^16 bytes have 5-byte headers
let very_long_vec = (0..65536).map(|i| (i & 0xFF) as u8).collect::<Vec<_>>();
let very_long_s = to_vec(&very_long_vec).unwrap();
assert_eq_hex!(&very_long_s[0..5], [0x5a, 0, 1, 0, 0]);
assert_eq_hex!(&very_long_s[5..], &very_long_vec[..]);
// byte strings ≥ 2^32 bytes have 9-byte headers,
// but they take too much RAM to test in most CI setups, such as Travis.
// Confident on our implementation of the serialization code (which
// only copies the byte slice contents provided the writer allows it),
// we `unsafe`-ly fake a gigantic slice by using a writer
// that will saturate right after the header has been written.
#[cfg(all(not(miri), target_pointer_width = "64"))] #[cfg_attr(rustfmt, rustfmt::skip)]
unsafe {
let fake_huge_byte_seq: &'_ [u8] = ::core::slice::from_raw_parts(
0x1 as _,
0x00_00_00_01_de_ad_be_ef,
);
let mut _9 = [0_u8; 9];
let _ = to_writer(&mut &mut _9[..], &fake_huge_byte_seq);
assert_eq_hex!(
&_9[..],
[
0x5b,
0x00, 0x00, 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef,
],
);
}
}
#[test]
fn test_half() {
| let vec = to_vec(&42.5f32).unwrap();
assert_eq_hex!(vec, b"\xF9\x51\x50");
assert_eq!(from_slice::<f32>(&vec[..]).unwrap(), 42.5f32);
}
}
} | identifier_body |
|
apply.rs | use anyhow::{anyhow, Context, Result};
use rand::seq::SliceRandom;
use std::fs;
use std::io::{self, Read};
use std::path;
use std::process;
use std::str;
use std::thread;
use crate::config::Config;
use crate::find::find;
use crate::operations::build::build_template;
use crate::scheme::Scheme;
/// Picks a random path, from given vec
/// * `values` - Vec with paths
fn random(values: Vec<path::PathBuf>) -> Result<path::PathBuf> {
let chosen = values.choose(&mut rand::thread_rng()).ok_or_else(|| {
anyhow!(
"Scheme not found. Check if it exists, or run update schemes if you didn't already."
)
})?;
Ok(chosen.to_path_buf())
}
/// Runs hook commands
///
/// * `command` - Command string to execute
/// * `verbose` - Should we be verbose?
fn run_hook(command: Option<String>, shell: &str, verbose: bool) -> Result<()> {
if let Some(command) = command {
let full_command = shell.replace("{}", &command);
if verbose {
println!("running {}", full_command);
}
let command_vec = shell_words::split(&full_command)?;
if command_vec.len() == 1 | else {
process::Command::new(&command_vec[0])
.args(&command_vec[1..])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
}
}
Ok(())
}
/// Replace with delimiter lines
///
/// In a string, removes everything from one line to another, and puts the built template in place
///
/// * `file_content` - String with lines to be replaced
/// * `start` - Where to start replacing
/// * `end` - Where to stop replacing
/// * `built_template` - Built template to be injected
fn replace_delimiter(
file_content: &str,
start: &str,
end: &str,
built_template: &str,
) -> Result<String> {
let mut changed_content = String::new();
let mut found_start = false;
let mut found_end = false;
let mut appended = false;
for line in file_content.lines() {
if found_start &&!found_end {
if!appended {
changed_content.push_str(&built_template);
appended = true;
}
if line.trim().to_lowercase().eq(&end) {
changed_content.push_str(&format!("{}\n", line));
found_end = true;
}
} else {
changed_content.push_str(&format!("{}\n", line));
if line.trim().to_lowercase().eq(&start) {
found_start = true
}
}
}
if!found_start {
Err(anyhow!("Couldn't find starting string."))
} else if!found_end {
Err(anyhow!("Couldn't find ending string."))
} else {
Ok(changed_content)
}
}
/// Apply function
///
/// * `patterns` - Which patterns the user specified
/// * `base_dir` - Flavours base directory
/// * `config_path` - Flavours configuration path
/// * `light` - Don't run hooks marked as non-lightweight
/// * `from_stdin` - Read scheme from stdin?
/// * `verbose` - Should we be verbose?
pub fn apply(
patterns: Vec<&str>,
base_dir: &path::Path,
config_path: &path::Path,
light_mode: bool,
from_stdin: bool,
verbose: bool,
) -> Result<()> {
let (scheme_contents, scheme_slug) = if from_stdin {
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_to_string(&mut buffer)?;
(buffer, String::from("generated"))
} else {
//Find schemes that match given patterns
let mut schemes = Vec::new();
for pattern in patterns {
let found_schemes = find(pattern, &base_dir.join("base16").join("schemes"))?;
for found_scheme in found_schemes {
schemes.push(found_scheme);
}
}
//Sort and remove duplicates
schemes.sort();
schemes.dedup();
//Get random scheme
let scheme_file = random(schemes)?;
let scheme_slug: String = scheme_file
.file_stem()
.ok_or_else(|| anyhow!("Couldn't get scheme name."))?
.to_str()
.ok_or_else(|| anyhow!("Couldn't convert scheme file name."))?
.into();
//Read chosen scheme
(
fs::read_to_string(&scheme_file)
.with_context(|| format!("Couldn't read scheme file at {:?}.", scheme_file))?,
scheme_slug,
)
};
let scheme = Scheme::from_str(&scheme_contents, &scheme_slug)?;
if verbose {
println!(
"Using scheme: {} ({}), by {}",
scheme.name, scheme.slug, scheme.author
);
println!();
}
//Check if config file exists
if!config_path.exists() {
eprintln!("Config {:?} doesn't exist, creating", config_path);
let default_content = match fs::read_to_string(path::Path::new("/etc/flavours.conf")) {
Ok(content) => content,
Err(_) => String::from(""),
};
let config_path_parent = config_path
.parent()
.with_context(|| format!("Couldn't get parent directory of {:?}", config_path))?;
fs::create_dir_all(config_path_parent).with_context(|| {
format!(
"Couldn't create configuration file parent directory {:?}",
config_path_parent
)
})?;
fs::write(config_path, default_content)
.with_context(|| format!("Couldn't create configuration file at {:?}", config_path))?;
}
let config_contents = fs::read_to_string(config_path)
.with_context(|| format!("Couldn't read configuration file {:?}.", config_path))?;
let config = Config::from_str(&config_contents)?;
// If shell is present, check if it contains the placeholder
let shell = config.shell.unwrap_or_else(|| "sh -c '{}'".into());
if!shell.contains("{}") {
return Err(anyhow!("The configured shell does not contain the required command placeholder '{}'. Check the default file or github for config examples."));
}
let mut hooks = Vec::new();
//Iterate configurated entries (templates)
let items_legacy = config.item.unwrap_or_default();
let mut items = config.items.unwrap_or_default();
items.extend(items_legacy.into_iter()) ;
if items.is_empty() {
return Err(anyhow!("Couldn't get items from config file. Check the default file or github for config examples."));
}
for item in items.iter() {
//Template name
let template = &item.template;
//Subtemplate name
let subtemplate = match &item.subtemplate {
Some(value) => String::from(value),
None => String::from("default"),
};
//Is the hook lightweight?
let light = match &item.light {
Some(value) => *value,
None => true,
};
//Rewrite or replace
let rewrite = match &item.rewrite {
Some(value) => *value,
None => false,
};
//Replace start delimiter
let start = match &item.start {
Some(value) => String::from(value),
None => String::from("# Start flavours"),
}
.trim()
.to_lowercase();
//Replace end delimiter
let end = match &item.end {
Some(value) => String::from(value),
None => String::from("# End flavours"),
}
.trim()
.to_lowercase();
//(sub)template file path
let subtemplate_file = &base_dir
.join("base16")
.join("templates")
.join(&template)
.join("templates")
.join(format!("{}.mustache", subtemplate));
//Template content
let template_content = fs::read_to_string(subtemplate_file)
.with_context(||format!("Couldn't read template {}/{} at {:?}. Check if the correct template/subtemplate was specified, and run the update templates command if you didn't already.", template, subtemplate, subtemplate_file))?;
//Template with correct colors
let built_template = build_template(template_content, &scheme)
.context("Couldn't replace placeholders. Check if all colors on the specified scheme file are valid (don't include a leading '#').")?;
//File to write
let file = shellexpand::full(&item.file)?.to_string();
//Rewrite file with built template
if rewrite {
std::path::Path::new(&file).parent().and_then(|p| fs::create_dir_all(p).ok());
fs::write(&file, built_template)
.with_context(|| format!("Couldn't write to file {:?}.", file))?;
if verbose {
println!("Wrote {}/{} on: {:?}", template, subtemplate, file)
}
} else {
//Or replace with delimiters
let file_content = fs::read_to_string(&file)?;
match replace_delimiter(&file_content, &start, &end, &built_template) {
Ok(content) => fs::write(&file, content)
.with_context(|| format!("Couldn't write to file {:?}", file))?,
Err(error) => eprintln!("Couldn't replace lines in {:?}: {}", file, error),
}
if verbose {
println!("Wrote {}/{} on {:?}", template, subtemplate, file);
}
}
let command = item.hook.clone();
let shell = shell.clone();
// Only add hook to queue if either:
// - Not running on lightweight mode
// - Hook is set as lightweight
if!light_mode || light {
hooks.push(thread::spawn(move || run_hook(command, &shell, verbose)));
}
}
let last_scheme_file = &base_dir.join("lastscheme");
fs::write(&last_scheme_file, &scheme.slug)
.with_context(|| "Couldn't update applied scheme name")?;
while!hooks.is_empty() {
hooks
.pop()
.ok_or_else(|| anyhow!("Couldn't pop hooks."))?
.join()
.unwrap()?;
}
if verbose {
println!("Successfully applied {}", &scheme.slug);
}
Ok(())
}
| {
process::Command::new(&command_vec[0])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
} | conditional_block |
apply.rs | use anyhow::{anyhow, Context, Result};
use rand::seq::SliceRandom;
use std::fs;
use std::io::{self, Read};
use std::path;
use std::process;
use std::str;
use std::thread;
use crate::config::Config;
use crate::find::find;
use crate::operations::build::build_template;
use crate::scheme::Scheme;
/// Picks a random path, from given vec
/// * `values` - Vec with paths
fn random(values: Vec<path::PathBuf>) -> Result<path::PathBuf> {
let chosen = values.choose(&mut rand::thread_rng()).ok_or_else(|| {
anyhow!(
"Scheme not found. Check if it exists, or run update schemes if you didn't already."
)
})?;
Ok(chosen.to_path_buf())
}
/// Runs hook commands
///
/// * `command` - Command string to execute
/// * `verbose` - Should we be verbose?
fn run_hook(command: Option<String>, shell: &str, verbose: bool) -> Result<()> {
if let Some(command) = command {
let full_command = shell.replace("{}", &command);
if verbose {
println!("running {}", full_command);
}
let command_vec = shell_words::split(&full_command)?;
if command_vec.len() == 1 {
process::Command::new(&command_vec[0])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
} else {
process::Command::new(&command_vec[0])
.args(&command_vec[1..])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
}
}
Ok(())
}
/// Replace with delimiter lines
///
/// In a string, removes everything from one line to another, and puts the built template in place
///
/// * `file_content` - String with lines to be replaced
/// * `start` - Where to start replacing
/// * `end` - Where to stop replacing
/// * `built_template` - Built template to be injected
fn | (
file_content: &str,
start: &str,
end: &str,
built_template: &str,
) -> Result<String> {
let mut changed_content = String::new();
let mut found_start = false;
let mut found_end = false;
let mut appended = false;
for line in file_content.lines() {
if found_start &&!found_end {
if!appended {
changed_content.push_str(&built_template);
appended = true;
}
if line.trim().to_lowercase().eq(&end) {
changed_content.push_str(&format!("{}\n", line));
found_end = true;
}
} else {
changed_content.push_str(&format!("{}\n", line));
if line.trim().to_lowercase().eq(&start) {
found_start = true
}
}
}
if!found_start {
Err(anyhow!("Couldn't find starting string."))
} else if!found_end {
Err(anyhow!("Couldn't find ending string."))
} else {
Ok(changed_content)
}
}
/// Apply function
///
/// * `patterns` - Which patterns the user specified
/// * `base_dir` - Flavours base directory
/// * `config_path` - Flavours configuration path
/// * `light` - Don't run hooks marked as non-lightweight
/// * `from_stdin` - Read scheme from stdin?
/// * `verbose` - Should we be verbose?
pub fn apply(
patterns: Vec<&str>,
base_dir: &path::Path,
config_path: &path::Path,
light_mode: bool,
from_stdin: bool,
verbose: bool,
) -> Result<()> {
let (scheme_contents, scheme_slug) = if from_stdin {
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_to_string(&mut buffer)?;
(buffer, String::from("generated"))
} else {
//Find schemes that match given patterns
let mut schemes = Vec::new();
for pattern in patterns {
let found_schemes = find(pattern, &base_dir.join("base16").join("schemes"))?;
for found_scheme in found_schemes {
schemes.push(found_scheme);
}
}
//Sort and remove duplicates
schemes.sort();
schemes.dedup();
//Get random scheme
let scheme_file = random(schemes)?;
let scheme_slug: String = scheme_file
.file_stem()
.ok_or_else(|| anyhow!("Couldn't get scheme name."))?
.to_str()
.ok_or_else(|| anyhow!("Couldn't convert scheme file name."))?
.into();
//Read chosen scheme
(
fs::read_to_string(&scheme_file)
.with_context(|| format!("Couldn't read scheme file at {:?}.", scheme_file))?,
scheme_slug,
)
};
let scheme = Scheme::from_str(&scheme_contents, &scheme_slug)?;
if verbose {
println!(
"Using scheme: {} ({}), by {}",
scheme.name, scheme.slug, scheme.author
);
println!();
}
//Check if config file exists
if!config_path.exists() {
eprintln!("Config {:?} doesn't exist, creating", config_path);
let default_content = match fs::read_to_string(path::Path::new("/etc/flavours.conf")) {
Ok(content) => content,
Err(_) => String::from(""),
};
let config_path_parent = config_path
.parent()
.with_context(|| format!("Couldn't get parent directory of {:?}", config_path))?;
fs::create_dir_all(config_path_parent).with_context(|| {
format!(
"Couldn't create configuration file parent directory {:?}",
config_path_parent
)
})?;
fs::write(config_path, default_content)
.with_context(|| format!("Couldn't create configuration file at {:?}", config_path))?;
}
let config_contents = fs::read_to_string(config_path)
.with_context(|| format!("Couldn't read configuration file {:?}.", config_path))?;
let config = Config::from_str(&config_contents)?;
// If shell is present, check if it contains the placeholder
let shell = config.shell.unwrap_or_else(|| "sh -c '{}'".into());
if!shell.contains("{}") {
return Err(anyhow!("The configured shell does not contain the required command placeholder '{}'. Check the default file or github for config examples."));
}
let mut hooks = Vec::new();
//Iterate configurated entries (templates)
let items_legacy = config.item.unwrap_or_default();
let mut items = config.items.unwrap_or_default();
items.extend(items_legacy.into_iter()) ;
if items.is_empty() {
return Err(anyhow!("Couldn't get items from config file. Check the default file or github for config examples."));
}
for item in items.iter() {
//Template name
let template = &item.template;
//Subtemplate name
let subtemplate = match &item.subtemplate {
Some(value) => String::from(value),
None => String::from("default"),
};
//Is the hook lightweight?
let light = match &item.light {
Some(value) => *value,
None => true,
};
//Rewrite or replace
let rewrite = match &item.rewrite {
Some(value) => *value,
None => false,
};
//Replace start delimiter
let start = match &item.start {
Some(value) => String::from(value),
None => String::from("# Start flavours"),
}
.trim()
.to_lowercase();
//Replace end delimiter
let end = match &item.end {
Some(value) => String::from(value),
None => String::from("# End flavours"),
}
.trim()
.to_lowercase();
//(sub)template file path
let subtemplate_file = &base_dir
.join("base16")
.join("templates")
.join(&template)
.join("templates")
.join(format!("{}.mustache", subtemplate));
//Template content
let template_content = fs::read_to_string(subtemplate_file)
.with_context(||format!("Couldn't read template {}/{} at {:?}. Check if the correct template/subtemplate was specified, and run the update templates command if you didn't already.", template, subtemplate, subtemplate_file))?;
//Template with correct colors
let built_template = build_template(template_content, &scheme)
.context("Couldn't replace placeholders. Check if all colors on the specified scheme file are valid (don't include a leading '#').")?;
//File to write
let file = shellexpand::full(&item.file)?.to_string();
//Rewrite file with built template
if rewrite {
std::path::Path::new(&file).parent().and_then(|p| fs::create_dir_all(p).ok());
fs::write(&file, built_template)
.with_context(|| format!("Couldn't write to file {:?}.", file))?;
if verbose {
println!("Wrote {}/{} on: {:?}", template, subtemplate, file)
}
} else {
//Or replace with delimiters
let file_content = fs::read_to_string(&file)?;
match replace_delimiter(&file_content, &start, &end, &built_template) {
Ok(content) => fs::write(&file, content)
.with_context(|| format!("Couldn't write to file {:?}", file))?,
Err(error) => eprintln!("Couldn't replace lines in {:?}: {}", file, error),
}
if verbose {
println!("Wrote {}/{} on {:?}", template, subtemplate, file);
}
}
let command = item.hook.clone();
let shell = shell.clone();
// Only add hook to queue if either:
// - Not running on lightweight mode
// - Hook is set as lightweight
if!light_mode || light {
hooks.push(thread::spawn(move || run_hook(command, &shell, verbose)));
}
}
let last_scheme_file = &base_dir.join("lastscheme");
fs::write(&last_scheme_file, &scheme.slug)
.with_context(|| "Couldn't update applied scheme name")?;
while!hooks.is_empty() {
hooks
.pop()
.ok_or_else(|| anyhow!("Couldn't pop hooks."))?
.join()
.unwrap()?;
}
if verbose {
println!("Successfully applied {}", &scheme.slug);
}
Ok(())
}
| replace_delimiter | identifier_name |
apply.rs | use anyhow::{anyhow, Context, Result};
use rand::seq::SliceRandom;
use std::fs;
use std::io::{self, Read};
use std::path;
use std::process;
use std::str;
use std::thread;
use crate::config::Config;
use crate::find::find;
use crate::operations::build::build_template;
use crate::scheme::Scheme;
/// Picks a random path, from given vec
/// * `values` - Vec with paths
fn random(values: Vec<path::PathBuf>) -> Result<path::PathBuf> {
let chosen = values.choose(&mut rand::thread_rng()).ok_or_else(|| {
anyhow!(
"Scheme not found. Check if it exists, or run update schemes if you didn't already."
)
})?;
Ok(chosen.to_path_buf())
}
/// Runs hook commands
///
/// * `command` - Command string to execute
/// * `verbose` - Should we be verbose?
fn run_hook(command: Option<String>, shell: &str, verbose: bool) -> Result<()> {
if let Some(command) = command {
let full_command = shell.replace("{}", &command);
if verbose {
println!("running {}", full_command);
}
let command_vec = shell_words::split(&full_command)?;
if command_vec.len() == 1 {
process::Command::new(&command_vec[0])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
} else {
process::Command::new(&command_vec[0])
.args(&command_vec[1..])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
}
}
Ok(())
}
/// Replace with delimiter lines
///
/// In a string, removes everything from one line to another, and puts the built template in place
///
/// * `file_content` - String with lines to be replaced
/// * `start` - Where to start replacing
/// * `end` - Where to stop replacing
/// * `built_template` - Built template to be injected
fn replace_delimiter(
file_content: &str,
start: &str,
end: &str,
built_template: &str,
) -> Result<String> {
let mut changed_content = String::new();
let mut found_start = false;
let mut found_end = false;
let mut appended = false;
for line in file_content.lines() {
if found_start &&!found_end {
if!appended {
changed_content.push_str(&built_template);
appended = true;
}
if line.trim().to_lowercase().eq(&end) {
changed_content.push_str(&format!("{}\n", line));
found_end = true;
}
} else {
changed_content.push_str(&format!("{}\n", line));
if line.trim().to_lowercase().eq(&start) {
found_start = true
}
}
}
if!found_start {
Err(anyhow!("Couldn't find starting string."))
} else if!found_end {
Err(anyhow!("Couldn't find ending string."))
} else {
Ok(changed_content)
}
}
/// Apply function
///
/// * `patterns` - Which patterns the user specified
/// * `base_dir` - Flavours base directory
/// * `config_path` - Flavours configuration path
/// * `light` - Don't run hooks marked as non-lightweight
/// * `from_stdin` - Read scheme from stdin?
/// * `verbose` - Should we be verbose?
pub fn apply(
patterns: Vec<&str>,
base_dir: &path::Path,
config_path: &path::Path,
light_mode: bool,
from_stdin: bool,
verbose: bool,
) -> Result<()> {
let (scheme_contents, scheme_slug) = if from_stdin {
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_to_string(&mut buffer)?;
(buffer, String::from("generated"))
} else {
//Find schemes that match given patterns
let mut schemes = Vec::new();
for pattern in patterns {
let found_schemes = find(pattern, &base_dir.join("base16").join("schemes"))?;
for found_scheme in found_schemes {
schemes.push(found_scheme);
}
}
//Sort and remove duplicates
schemes.sort();
schemes.dedup();
//Get random scheme
let scheme_file = random(schemes)?;
let scheme_slug: String = scheme_file
.file_stem()
.ok_or_else(|| anyhow!("Couldn't get scheme name."))?
.to_str()
.ok_or_else(|| anyhow!("Couldn't convert scheme file name."))?
.into();
//Read chosen scheme
(
fs::read_to_string(&scheme_file)
.with_context(|| format!("Couldn't read scheme file at {:?}.", scheme_file))?,
scheme_slug,
)
};
let scheme = Scheme::from_str(&scheme_contents, &scheme_slug)?;
if verbose {
println!(
"Using scheme: {} ({}), by {}",
scheme.name, scheme.slug, scheme.author
); | //Check if config file exists
if!config_path.exists() {
eprintln!("Config {:?} doesn't exist, creating", config_path);
let default_content = match fs::read_to_string(path::Path::new("/etc/flavours.conf")) {
Ok(content) => content,
Err(_) => String::from(""),
};
let config_path_parent = config_path
.parent()
.with_context(|| format!("Couldn't get parent directory of {:?}", config_path))?;
fs::create_dir_all(config_path_parent).with_context(|| {
format!(
"Couldn't create configuration file parent directory {:?}",
config_path_parent
)
})?;
fs::write(config_path, default_content)
.with_context(|| format!("Couldn't create configuration file at {:?}", config_path))?;
}
let config_contents = fs::read_to_string(config_path)
.with_context(|| format!("Couldn't read configuration file {:?}.", config_path))?;
let config = Config::from_str(&config_contents)?;
// If shell is present, check if it contains the placeholder
let shell = config.shell.unwrap_or_else(|| "sh -c '{}'".into());
if!shell.contains("{}") {
return Err(anyhow!("The configured shell does not contain the required command placeholder '{}'. Check the default file or github for config examples."));
}
let mut hooks = Vec::new();
//Iterate configurated entries (templates)
let items_legacy = config.item.unwrap_or_default();
let mut items = config.items.unwrap_or_default();
items.extend(items_legacy.into_iter()) ;
if items.is_empty() {
return Err(anyhow!("Couldn't get items from config file. Check the default file or github for config examples."));
}
for item in items.iter() {
//Template name
let template = &item.template;
//Subtemplate name
let subtemplate = match &item.subtemplate {
Some(value) => String::from(value),
None => String::from("default"),
};
//Is the hook lightweight?
let light = match &item.light {
Some(value) => *value,
None => true,
};
//Rewrite or replace
let rewrite = match &item.rewrite {
Some(value) => *value,
None => false,
};
//Replace start delimiter
let start = match &item.start {
Some(value) => String::from(value),
None => String::from("# Start flavours"),
}
.trim()
.to_lowercase();
//Replace end delimiter
let end = match &item.end {
Some(value) => String::from(value),
None => String::from("# End flavours"),
}
.trim()
.to_lowercase();
//(sub)template file path
let subtemplate_file = &base_dir
.join("base16")
.join("templates")
.join(&template)
.join("templates")
.join(format!("{}.mustache", subtemplate));
//Template content
let template_content = fs::read_to_string(subtemplate_file)
.with_context(||format!("Couldn't read template {}/{} at {:?}. Check if the correct template/subtemplate was specified, and run the update templates command if you didn't already.", template, subtemplate, subtemplate_file))?;
//Template with correct colors
let built_template = build_template(template_content, &scheme)
.context("Couldn't replace placeholders. Check if all colors on the specified scheme file are valid (don't include a leading '#').")?;
//File to write
let file = shellexpand::full(&item.file)?.to_string();
//Rewrite file with built template
if rewrite {
std::path::Path::new(&file).parent().and_then(|p| fs::create_dir_all(p).ok());
fs::write(&file, built_template)
.with_context(|| format!("Couldn't write to file {:?}.", file))?;
if verbose {
println!("Wrote {}/{} on: {:?}", template, subtemplate, file)
}
} else {
//Or replace with delimiters
let file_content = fs::read_to_string(&file)?;
match replace_delimiter(&file_content, &start, &end, &built_template) {
Ok(content) => fs::write(&file, content)
.with_context(|| format!("Couldn't write to file {:?}", file))?,
Err(error) => eprintln!("Couldn't replace lines in {:?}: {}", file, error),
}
if verbose {
println!("Wrote {}/{} on {:?}", template, subtemplate, file);
}
}
let command = item.hook.clone();
let shell = shell.clone();
// Only add hook to queue if either:
// - Not running on lightweight mode
// - Hook is set as lightweight
if!light_mode || light {
hooks.push(thread::spawn(move || run_hook(command, &shell, verbose)));
}
}
let last_scheme_file = &base_dir.join("lastscheme");
fs::write(&last_scheme_file, &scheme.slug)
.with_context(|| "Couldn't update applied scheme name")?;
while!hooks.is_empty() {
hooks
.pop()
.ok_or_else(|| anyhow!("Couldn't pop hooks."))?
.join()
.unwrap()?;
}
if verbose {
println!("Successfully applied {}", &scheme.slug);
}
Ok(())
} | println!();
}
| random_line_split |
apply.rs | use anyhow::{anyhow, Context, Result};
use rand::seq::SliceRandom;
use std::fs;
use std::io::{self, Read};
use std::path;
use std::process;
use std::str;
use std::thread;
use crate::config::Config;
use crate::find::find;
use crate::operations::build::build_template;
use crate::scheme::Scheme;
/// Picks a random path, from given vec
/// * `values` - Vec with paths
fn random(values: Vec<path::PathBuf>) -> Result<path::PathBuf> {
let chosen = values.choose(&mut rand::thread_rng()).ok_or_else(|| {
anyhow!(
"Scheme not found. Check if it exists, or run update schemes if you didn't already."
)
})?;
Ok(chosen.to_path_buf())
}
/// Runs hook commands
///
/// * `command` - Command string to execute
/// * `verbose` - Should we be verbose?
fn run_hook(command: Option<String>, shell: &str, verbose: bool) -> Result<()> | .with_context(|| format!("Couldn't run hook '{}'", full_command))?;
}
}
Ok(())
}
/// Replace with delimiter lines
///
/// In a string, removes everything from one line to another, and puts the built template in place
///
/// * `file_content` - String with lines to be replaced
/// * `start` - Where to start replacing
/// * `end` - Where to stop replacing
/// * `built_template` - Built template to be injected
fn replace_delimiter(
file_content: &str,
start: &str,
end: &str,
built_template: &str,
) -> Result<String> {
let mut changed_content = String::new();
let mut found_start = false;
let mut found_end = false;
let mut appended = false;
for line in file_content.lines() {
if found_start &&!found_end {
if!appended {
changed_content.push_str(&built_template);
appended = true;
}
if line.trim().to_lowercase().eq(&end) {
changed_content.push_str(&format!("{}\n", line));
found_end = true;
}
} else {
changed_content.push_str(&format!("{}\n", line));
if line.trim().to_lowercase().eq(&start) {
found_start = true
}
}
}
if!found_start {
Err(anyhow!("Couldn't find starting string."))
} else if!found_end {
Err(anyhow!("Couldn't find ending string."))
} else {
Ok(changed_content)
}
}
/// Apply function
///
/// * `patterns` - Which patterns the user specified
/// * `base_dir` - Flavours base directory
/// * `config_path` - Flavours configuration path
/// * `light` - Don't run hooks marked as non-lightweight
/// * `from_stdin` - Read scheme from stdin?
/// * `verbose` - Should we be verbose?
pub fn apply(
patterns: Vec<&str>,
base_dir: &path::Path,
config_path: &path::Path,
light_mode: bool,
from_stdin: bool,
verbose: bool,
) -> Result<()> {
let (scheme_contents, scheme_slug) = if from_stdin {
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_to_string(&mut buffer)?;
(buffer, String::from("generated"))
} else {
//Find schemes that match given patterns
let mut schemes = Vec::new();
for pattern in patterns {
let found_schemes = find(pattern, &base_dir.join("base16").join("schemes"))?;
for found_scheme in found_schemes {
schemes.push(found_scheme);
}
}
//Sort and remove duplicates
schemes.sort();
schemes.dedup();
//Get random scheme
let scheme_file = random(schemes)?;
let scheme_slug: String = scheme_file
.file_stem()
.ok_or_else(|| anyhow!("Couldn't get scheme name."))?
.to_str()
.ok_or_else(|| anyhow!("Couldn't convert scheme file name."))?
.into();
//Read chosen scheme
(
fs::read_to_string(&scheme_file)
.with_context(|| format!("Couldn't read scheme file at {:?}.", scheme_file))?,
scheme_slug,
)
};
let scheme = Scheme::from_str(&scheme_contents, &scheme_slug)?;
if verbose {
println!(
"Using scheme: {} ({}), by {}",
scheme.name, scheme.slug, scheme.author
);
println!();
}
//Check if config file exists
if!config_path.exists() {
eprintln!("Config {:?} doesn't exist, creating", config_path);
let default_content = match fs::read_to_string(path::Path::new("/etc/flavours.conf")) {
Ok(content) => content,
Err(_) => String::from(""),
};
let config_path_parent = config_path
.parent()
.with_context(|| format!("Couldn't get parent directory of {:?}", config_path))?;
fs::create_dir_all(config_path_parent).with_context(|| {
format!(
"Couldn't create configuration file parent directory {:?}",
config_path_parent
)
})?;
fs::write(config_path, default_content)
.with_context(|| format!("Couldn't create configuration file at {:?}", config_path))?;
}
let config_contents = fs::read_to_string(config_path)
.with_context(|| format!("Couldn't read configuration file {:?}.", config_path))?;
let config = Config::from_str(&config_contents)?;
// If shell is present, check if it contains the placeholder
let shell = config.shell.unwrap_or_else(|| "sh -c '{}'".into());
if!shell.contains("{}") {
return Err(anyhow!("The configured shell does not contain the required command placeholder '{}'. Check the default file or github for config examples."));
}
let mut hooks = Vec::new();
//Iterate configurated entries (templates)
let items_legacy = config.item.unwrap_or_default();
let mut items = config.items.unwrap_or_default();
items.extend(items_legacy.into_iter()) ;
if items.is_empty() {
return Err(anyhow!("Couldn't get items from config file. Check the default file or github for config examples."));
}
for item in items.iter() {
//Template name
let template = &item.template;
//Subtemplate name
let subtemplate = match &item.subtemplate {
Some(value) => String::from(value),
None => String::from("default"),
};
//Is the hook lightweight?
let light = match &item.light {
Some(value) => *value,
None => true,
};
//Rewrite or replace
let rewrite = match &item.rewrite {
Some(value) => *value,
None => false,
};
//Replace start delimiter
let start = match &item.start {
Some(value) => String::from(value),
None => String::from("# Start flavours"),
}
.trim()
.to_lowercase();
//Replace end delimiter
let end = match &item.end {
Some(value) => String::from(value),
None => String::from("# End flavours"),
}
.trim()
.to_lowercase();
//(sub)template file path
let subtemplate_file = &base_dir
.join("base16")
.join("templates")
.join(&template)
.join("templates")
.join(format!("{}.mustache", subtemplate));
//Template content
let template_content = fs::read_to_string(subtemplate_file)
.with_context(||format!("Couldn't read template {}/{} at {:?}. Check if the correct template/subtemplate was specified, and run the update templates command if you didn't already.", template, subtemplate, subtemplate_file))?;
//Template with correct colors
let built_template = build_template(template_content, &scheme)
.context("Couldn't replace placeholders. Check if all colors on the specified scheme file are valid (don't include a leading '#').")?;
//File to write
let file = shellexpand::full(&item.file)?.to_string();
//Rewrite file with built template
if rewrite {
std::path::Path::new(&file).parent().and_then(|p| fs::create_dir_all(p).ok());
fs::write(&file, built_template)
.with_context(|| format!("Couldn't write to file {:?}.", file))?;
if verbose {
println!("Wrote {}/{} on: {:?}", template, subtemplate, file)
}
} else {
//Or replace with delimiters
let file_content = fs::read_to_string(&file)?;
match replace_delimiter(&file_content, &start, &end, &built_template) {
Ok(content) => fs::write(&file, content)
.with_context(|| format!("Couldn't write to file {:?}", file))?,
Err(error) => eprintln!("Couldn't replace lines in {:?}: {}", file, error),
}
if verbose {
println!("Wrote {}/{} on {:?}", template, subtemplate, file);
}
}
let command = item.hook.clone();
let shell = shell.clone();
// Only add hook to queue if either:
// - Not running on lightweight mode
// - Hook is set as lightweight
if!light_mode || light {
hooks.push(thread::spawn(move || run_hook(command, &shell, verbose)));
}
}
let last_scheme_file = &base_dir.join("lastscheme");
fs::write(&last_scheme_file, &scheme.slug)
.with_context(|| "Couldn't update applied scheme name")?;
while!hooks.is_empty() {
hooks
.pop()
.ok_or_else(|| anyhow!("Couldn't pop hooks."))?
.join()
.unwrap()?;
}
if verbose {
println!("Successfully applied {}", &scheme.slug);
}
Ok(())
}
| {
if let Some(command) = command {
let full_command = shell.replace("{}", &command);
if verbose {
println!("running {}", full_command);
}
let command_vec = shell_words::split(&full_command)?;
if command_vec.len() == 1 {
process::Command::new(&command_vec[0])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
} else {
process::Command::new(&command_vec[0])
.args(&command_vec[1..])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status() | identifier_body |
player.rs | use std::collections::HashMap;
use crate::card::{Card, Colour};
use crate::game::{Action, VisibleGame};
use crate::power::Power;
use crate::power::ScienceItem;
use crate::resources::{ProducedResources, Resources};
use crate::wonder::{WonderBoard, WonderSide, WonderType};
use std::fmt::Debug;
use crate::algorithms::PlayingAlgorithm;
use std::mem;
#[derive(Debug)]
pub struct Player {
algorithm: Box<dyn PlayingAlgorithm>,
wonder: WonderBoard,
built_structures: Vec<Card>,
built_wonder_stages: Vec<Option<Card>>, // TODO: how to represent this?
coins: u32,
hand: Vec<Card>,
}
#[allow(dead_code)]
impl Player {
pub fn new(
wonder_type: WonderType,
wonder_side: WonderSide,
algorithm: Box<dyn PlayingAlgorithm>) -> Player {
Player {
algorithm,
wonder: WonderBoard { wonder_type, wonder_side },
built_structures: vec![],
built_wonder_stages: vec![],
coins: 3,
hand: vec![],
}
}
pub fn algorithm(&self) -> &dyn PlayingAlgorithm {
&*self.algorithm
}
pub fn wonder(&self) -> &WonderBoard {
&self.wonder
}
pub fn built_structures(&self) -> &Vec<Card> {
&self.built_structures
}
pub fn coins(&self) -> u32 {
self.coins
}
pub fn hand(&self) -> &Vec<Card> {
&self.hand
}
/// Performs the given [`Action`] on the current player, for example moving a card from the player's hand into the
/// player's built structures. Returns `true` if the action is legal, `false` otherwise (in which case this function
/// otherwise does nothing).
pub fn do_action(&mut self, action: &Action, visible_game: &VisibleGame, discard_pile: &mut Vec<Card>) -> bool {
// Removes and returns the given card from the player's hand.
fn remove_from_hand(hand: &mut Vec<Card>, card: &Card) -> Card {
let index = hand.iter().position(|c| c == card).unwrap();
hand.swap_remove(index)
}
if self.can_play(action, visible_game) {
match action {
Action::Build(card) => {
let card_from_hand = remove_from_hand(&mut self.hand, card);
self.built_structures.push(card_from_hand);
self.coins -= card_from_hand.cost().coins;
// TODO: deal with borrowed resources
}
Action::Wonder(_) => todo!(),
Action::Discard(card) => {
discard_pile.push(remove_from_hand(&mut self.hand, card));
self.coins += 3;
}
}
true
} else {
false
}
}
/// Replaces this player's hand with the given cards, returning the hand the player had before the swap.
pub fn swap_hand(&mut self, new_hand: Vec<Card>) -> Vec<Card> {
mem::replace(&mut self.hand, new_hand)
}
fn evaluate_green(colour_cards: &[Card]) -> f32 {
let mut science_items_count: HashMap<ScienceItem, i32> = HashMap::new();
science_items_count.insert(ScienceItem::Compass, 0);
science_items_count.insert(ScienceItem::Cog, 0);
science_items_count.insert(ScienceItem::Tablet, 0);
for card in colour_cards.iter() {
if let Power::Science(science_items) = card.power() {
for science_item in science_items.iter() {
let count = science_items_count.entry(*science_item).or_insert(0);
*count += 1;
}
}
}
let score_for_sets_of_identical_symbols: f32 = science_items_count.iter()
.filter(|(_, count)| **count > 0)
.map(|(_, count)| {
(*count as f32).powf(2f32)
})
.sum();
let score_for_all_symbol_groups: f32 = 7f32 *
*science_items_count.iter().min_by_key(|(_, count)| *count).unwrap().1 as f32;
score_for_all_symbol_groups + score_for_sets_of_identical_symbols
}
fn evaluate_colour(cards_of_given_colour: &[Card]) -> f32 {
let colour = cards_of_given_colour.get(0).unwrap().colour();
match colour {
Colour::Green => Self::evaluate_green(cards_of_given_colour),
_ => cards_of_given_colour.iter().map(|card| card.immediate_strength()).sum(),
}
}
fn strength_internal(cards: &[Card]) -> f32 {
let mut colour_to_structure = HashMap::new();
for structure in cards.iter() {
let colour_structures = colour_to_structure.entry(structure.colour()).or_insert_with(Vec::new);
colour_structures.push(*structure)
}
colour_to_structure.iter()
.map(|colour_entry| Self::evaluate_colour(colour_entry.1))
.sum()
}
/// Returns this player's "strength" -- a number where a higher value means the player is doing better than a lower
/// value.
pub fn strength(&self) -> f32 {
Self::strength_internal(&self.built_structures)
}
pub fn can_play(&self, action: &Action, visible_game: &VisibleGame) -> bool {
match action {
Action::Build(card) => self.can_play_card(card, visible_game),
Action::Wonder(_) => todo!(),
Action::Discard(_) => true,
}
}
/// Returns `true` if the user can afford to play the given card, given the resources the player
/// has access to.
///
/// TODO: doesn't currently deal with borrowing resources from neighbours.
fn can_play_card(&self, card: &Card, _visible_game: &VisibleGame) -> bool {
if!self.hand.iter().any(|c| c == card) {
return false;
}
// Initialise a Resources struct with the number of coins we have.
let mut available_resources = Resources::coins(self.coins);
// Add all the other resources we always have access to (ie. those that are not resource
// "choice" cards. At the same time, make a vector of resources choices available to us.
let mut choices = Vec::new();
for card in &self.built_structures {
match card.power() {
// TODO: can we write these four options more succinctly?
Power::PurchasableProducer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::Producer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::PurchasableProducer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
Power::Producer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
_ => {}
}
}
// Add Wonder starting resources.
available_resources += &self.wonder.starting_resource();
if available_resources.can_afford(&card.cost()) {
return true;
}
if!choices.is_empty() {
// Iterate through all possible combinations of the choices we have. Use the iteration
// index to work out which choice to make for each card.
let combinations: u32 = choices.iter()
.fold(1, |x, y| x * y.len() as u32);
for combination in 0..combinations {
let mut available_resources_option = available_resources.clone();
let mut combination = combination;
for choice in &choices {
let index = combination % choice.len() as u32;
available_resources_option += &choice[index as usize];
combination /= choice.len() as u32;
}
if available_resources_option.can_afford(&card.cost()) {
return true;
}
}
}
false
}
}
/// Represents the aspects of [`Player`] that are public knowledge (ie. visible on the table). Things like a player's
/// current hand are not included.
pub struct PublicPlayer {
pub wonder: WonderBoard,
pub built_structures: Vec<Card>,
pub coins: u32,
}
impl PublicPlayer {
/// Creates a [`PublicPlayer`] from a [`Player`], copy/cloning the values so the originals can be mutated later
/// without issue.
pub fn new(player: &Player) -> PublicPlayer {
PublicPlayer {
wonder: player.wonder,
built_structures: player.built_structures.clone(),
coins: player.coins,
}
}
}
#[cfg(test)]
mod tests {
use Card::*;
use super::*;
use crate::algorithms::random::Random;
#[test]
fn can_play_returns_true_when_player_can_afford_card() {
// TODO: @Before etc
let player = new_player(vec![LumberYard]);
assert_eq!(true, player.can_play(&Action::Build(LumberYard), &visible_game()));
}
#[test]
fn can_play_returns_true_after_player_builds_required_resources() {
let mut player = new_player(vec![StonePit, Quarry, Aqueduct]);
player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]);
assert_eq!(false, player.can_play(&Action::Build(Aqueduct), &visible_game()));
assert_eq!(true, player.do_action(&Action::Build(Quarry), &visible_game(), &mut vec![]));
assert_eq!(true, player.can_play(&Action::Build(Aqueduct), &visible_game()));
}
#[test]
fn strength_returns_sum_of_card_strengths() {
assert_eq!(0.0, Player::strength_internal(&vec![StonePit]));
assert_eq!(5.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct]));
assert_eq!(6.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct, Loom1, Apothecary]));
}
#[test]
fn strength_returns_correct_strength_of_green_structures() {
assert_eq!(1.0, Player::strength_internal(&vec![Lodge]));
assert_eq!(4.0, Player::strength_internal(&vec![Lodge, Apothecary]));
assert_eq!(9.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary]));
assert_eq!(10.0, Player::strength_internal(&vec![Lodge, Workshop, Library]));
assert_eq!(21.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary, Laboratory, Workshop, Library])); // rulebook example
}
#[test]
fn can_play_returns_false_when_player_cannot_pay() {
let mut player = new_player(vec![]);
player.coins = 0; //TODO introduce a Bank type to allow for double-entry bookkeeping instead of this
assert_eq!(false, player.can_play(&Action::Build(TreeFarm), &visible_game()));
}
#[test]
fn can_play_returns_false_when_both_choice_resources_needed() |
#[test]
fn do_action_returns_false_if_action_not_playable() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(false, player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]));
}
#[test]
fn do_action_transfers_built_card_from_hand_to_built_structures() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(0, player.built_structures.len());
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Build(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(1, player.built_structures.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_decrements_cost_in_coins_when_building() {
let mut player = new_player(vec![TreeFarm]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Build(TreeFarm), &visible_game(), &mut vec![]));
assert_eq!(2, player.coins);
}
#[test]
fn do_action_transfers_discarded_card_from_hand_to_discard_pile() {
let mut player = new_player(vec![LumberYard]);
let mut discard_pile = vec![];
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut discard_pile));
assert_eq!(1, discard_pile.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_adds_three_coins_when_discarding() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(6, player.coins);
}
#[test]
fn new_public_player() {
let player = new_player(vec![LumberYard]);
let public_player = PublicPlayer::new(&player);
assert_eq!(player.wonder, public_player.wonder);
assert_eq!(player.built_structures, public_player.built_structures);
assert_eq!(player.coins, public_player.coins);
}
fn new_player(hand: Vec<Card>) -> Player {
let mut player = Player::new(WonderType::ColossusOfRhodes, WonderSide::A, Box::new(Random {}));
player.swap_hand(hand);
player
}
fn visible_game() -> VisibleGame<'static> {
VisibleGame { players: &[], player_index: 0 }
}
}
| {
// TODO implement
} | identifier_body |
player.rs | use std::collections::HashMap;
use crate::card::{Card, Colour};
use crate::game::{Action, VisibleGame};
use crate::power::Power;
use crate::power::ScienceItem;
use crate::resources::{ProducedResources, Resources};
use crate::wonder::{WonderBoard, WonderSide, WonderType};
use std::fmt::Debug;
use crate::algorithms::PlayingAlgorithm;
use std::mem;
#[derive(Debug)]
pub struct Player {
algorithm: Box<dyn PlayingAlgorithm>,
wonder: WonderBoard,
built_structures: Vec<Card>,
built_wonder_stages: Vec<Option<Card>>, // TODO: how to represent this?
coins: u32,
hand: Vec<Card>,
}
#[allow(dead_code)]
impl Player {
pub fn new(
wonder_type: WonderType,
wonder_side: WonderSide,
algorithm: Box<dyn PlayingAlgorithm>) -> Player {
Player {
algorithm,
wonder: WonderBoard { wonder_type, wonder_side },
built_structures: vec![],
built_wonder_stages: vec![],
coins: 3,
hand: vec![],
}
}
pub fn algorithm(&self) -> &dyn PlayingAlgorithm {
&*self.algorithm
}
pub fn wonder(&self) -> &WonderBoard {
&self.wonder
}
pub fn built_structures(&self) -> &Vec<Card> {
&self.built_structures
}
pub fn coins(&self) -> u32 {
self.coins
}
pub fn hand(&self) -> &Vec<Card> {
&self.hand
}
/// Performs the given [`Action`] on the current player, for example moving a card from the player's hand into the
/// player's built structures. Returns `true` if the action is legal, `false` otherwise (in which case this function
/// otherwise does nothing).
pub fn do_action(&mut self, action: &Action, visible_game: &VisibleGame, discard_pile: &mut Vec<Card>) -> bool {
// Removes and returns the given card from the player's hand.
fn remove_from_hand(hand: &mut Vec<Card>, card: &Card) -> Card {
let index = hand.iter().position(|c| c == card).unwrap();
hand.swap_remove(index)
}
if self.can_play(action, visible_game) {
match action {
Action::Build(card) => {
let card_from_hand = remove_from_hand(&mut self.hand, card);
self.built_structures.push(card_from_hand);
self.coins -= card_from_hand.cost().coins;
// TODO: deal with borrowed resources
}
Action::Wonder(_) => todo!(),
Action::Discard(card) => {
discard_pile.push(remove_from_hand(&mut self.hand, card));
self.coins += 3;
}
}
true
} else {
false
}
}
/// Replaces this player's hand with the given cards, returning the hand the player had before the swap.
pub fn swap_hand(&mut self, new_hand: Vec<Card>) -> Vec<Card> {
mem::replace(&mut self.hand, new_hand)
}
fn evaluate_green(colour_cards: &[Card]) -> f32 {
let mut science_items_count: HashMap<ScienceItem, i32> = HashMap::new();
science_items_count.insert(ScienceItem::Compass, 0);
science_items_count.insert(ScienceItem::Cog, 0);
science_items_count.insert(ScienceItem::Tablet, 0);
for card in colour_cards.iter() {
if let Power::Science(science_items) = card.power() {
for science_item in science_items.iter() {
let count = science_items_count.entry(*science_item).or_insert(0);
*count += 1;
}
}
}
let score_for_sets_of_identical_symbols: f32 = science_items_count.iter()
.filter(|(_, count)| **count > 0)
.map(|(_, count)| {
(*count as f32).powf(2f32)
})
.sum();
let score_for_all_symbol_groups: f32 = 7f32 *
*science_items_count.iter().min_by_key(|(_, count)| *count).unwrap().1 as f32;
score_for_all_symbol_groups + score_for_sets_of_identical_symbols
}
fn evaluate_colour(cards_of_given_colour: &[Card]) -> f32 {
let colour = cards_of_given_colour.get(0).unwrap().colour();
match colour {
Colour::Green => Self::evaluate_green(cards_of_given_colour),
_ => cards_of_given_colour.iter().map(|card| card.immediate_strength()).sum(),
}
}
fn strength_internal(cards: &[Card]) -> f32 {
let mut colour_to_structure = HashMap::new();
for structure in cards.iter() {
let colour_structures = colour_to_structure.entry(structure.colour()).or_insert_with(Vec::new);
colour_structures.push(*structure)
}
colour_to_structure.iter()
.map(|colour_entry| Self::evaluate_colour(colour_entry.1))
.sum()
}
/// Returns this player's "strength" -- a number where a higher value means the player is doing better than a lower
/// value.
pub fn strength(&self) -> f32 {
Self::strength_internal(&self.built_structures)
}
pub fn can_play(&self, action: &Action, visible_game: &VisibleGame) -> bool {
match action {
Action::Build(card) => self.can_play_card(card, visible_game),
Action::Wonder(_) => todo!(),
Action::Discard(_) => true,
}
}
/// Returns `true` if the user can afford to play the given card, given the resources the player
/// has access to.
///
/// TODO: doesn't currently deal with borrowing resources from neighbours.
fn can_play_card(&self, card: &Card, _visible_game: &VisibleGame) -> bool {
if!self.hand.iter().any(|c| c == card) {
return false;
}
// Initialise a Resources struct with the number of coins we have.
let mut available_resources = Resources::coins(self.coins);
// Add all the other resources we always have access to (ie. those that are not resource
// "choice" cards. At the same time, make a vector of resources choices available to us.
let mut choices = Vec::new();
for card in &self.built_structures {
match card.power() {
// TODO: can we write these four options more succinctly?
Power::PurchasableProducer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::Producer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::PurchasableProducer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
Power::Producer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
_ => {}
}
}
// Add Wonder starting resources.
available_resources += &self.wonder.starting_resource();
if available_resources.can_afford(&card.cost()) {
return true;
}
if!choices.is_empty() {
// Iterate through all possible combinations of the choices we have. Use the iteration
// index to work out which choice to make for each card.
let combinations: u32 = choices.iter()
.fold(1, |x, y| x * y.len() as u32);
for combination in 0..combinations {
let mut available_resources_option = available_resources.clone();
let mut combination = combination;
for choice in &choices {
let index = combination % choice.len() as u32;
available_resources_option += &choice[index as usize];
combination /= choice.len() as u32;
}
if available_resources_option.can_afford(&card.cost()) {
return true;
}
}
}
false
}
}
/// Represents the aspects of [`Player`] that are public knowledge (ie. visible on the table). Things like a player's
/// current hand are not included.
pub struct PublicPlayer {
pub wonder: WonderBoard,
pub built_structures: Vec<Card>,
pub coins: u32,
}
impl PublicPlayer {
/// Creates a [`PublicPlayer`] from a [`Player`], copy/cloning the values so the originals can be mutated later
/// without issue.
pub fn new(player: &Player) -> PublicPlayer {
PublicPlayer {
wonder: player.wonder,
built_structures: player.built_structures.clone(),
coins: player.coins,
}
}
}
#[cfg(test)]
mod tests {
use Card::*;
use super::*;
use crate::algorithms::random::Random;
#[test]
fn can_play_returns_true_when_player_can_afford_card() {
// TODO: @Before etc
let player = new_player(vec![LumberYard]);
assert_eq!(true, player.can_play(&Action::Build(LumberYard), &visible_game()));
}
#[test]
fn can_play_returns_true_after_player_builds_required_resources() {
let mut player = new_player(vec![StonePit, Quarry, Aqueduct]);
player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]);
assert_eq!(false, player.can_play(&Action::Build(Aqueduct), &visible_game()));
assert_eq!(true, player.do_action(&Action::Build(Quarry), &visible_game(), &mut vec![]));
assert_eq!(true, player.can_play(&Action::Build(Aqueduct), &visible_game()));
}
#[test]
fn strength_returns_sum_of_card_strengths() {
assert_eq!(0.0, Player::strength_internal(&vec![StonePit]));
assert_eq!(5.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct]));
assert_eq!(6.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct, Loom1, Apothecary]));
}
#[test]
fn strength_returns_correct_strength_of_green_structures() {
assert_eq!(1.0, Player::strength_internal(&vec![Lodge]));
assert_eq!(4.0, Player::strength_internal(&vec![Lodge, Apothecary]));
assert_eq!(9.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary]));
assert_eq!(10.0, Player::strength_internal(&vec![Lodge, Workshop, Library]));
assert_eq!(21.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary, Laboratory, Workshop, Library])); // rulebook example
}
#[test]
fn can_play_returns_false_when_player_cannot_pay() {
let mut player = new_player(vec![]);
player.coins = 0; //TODO introduce a Bank type to allow for double-entry bookkeeping instead of this
assert_eq!(false, player.can_play(&Action::Build(TreeFarm), &visible_game()));
}
#[test]
fn can_play_returns_false_when_both_choice_resources_needed() {
// TODO implement
}
#[test]
fn | () {
let mut player = new_player(vec![LumberYard]);
assert_eq!(false, player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]));
}
#[test]
fn do_action_transfers_built_card_from_hand_to_built_structures() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(0, player.built_structures.len());
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Build(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(1, player.built_structures.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_decrements_cost_in_coins_when_building() {
let mut player = new_player(vec![TreeFarm]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Build(TreeFarm), &visible_game(), &mut vec![]));
assert_eq!(2, player.coins);
}
#[test]
fn do_action_transfers_discarded_card_from_hand_to_discard_pile() {
let mut player = new_player(vec![LumberYard]);
let mut discard_pile = vec![];
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut discard_pile));
assert_eq!(1, discard_pile.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_adds_three_coins_when_discarding() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(6, player.coins);
}
#[test]
fn new_public_player() {
let player = new_player(vec![LumberYard]);
let public_player = PublicPlayer::new(&player);
assert_eq!(player.wonder, public_player.wonder);
assert_eq!(player.built_structures, public_player.built_structures);
assert_eq!(player.coins, public_player.coins);
}
fn new_player(hand: Vec<Card>) -> Player {
let mut player = Player::new(WonderType::ColossusOfRhodes, WonderSide::A, Box::new(Random {}));
player.swap_hand(hand);
player
}
fn visible_game() -> VisibleGame<'static> {
VisibleGame { players: &[], player_index: 0 }
}
}
| do_action_returns_false_if_action_not_playable | identifier_name |
player.rs | use std::collections::HashMap;
use crate::card::{Card, Colour};
use crate::game::{Action, VisibleGame};
use crate::power::Power;
use crate::power::ScienceItem;
use crate::resources::{ProducedResources, Resources};
use crate::wonder::{WonderBoard, WonderSide, WonderType};
use std::fmt::Debug;
use crate::algorithms::PlayingAlgorithm;
use std::mem;
#[derive(Debug)]
pub struct Player {
algorithm: Box<dyn PlayingAlgorithm>,
wonder: WonderBoard,
built_structures: Vec<Card>,
built_wonder_stages: Vec<Option<Card>>, // TODO: how to represent this?
coins: u32,
hand: Vec<Card>,
}
#[allow(dead_code)]
impl Player {
pub fn new(
wonder_type: WonderType,
wonder_side: WonderSide,
algorithm: Box<dyn PlayingAlgorithm>) -> Player {
Player {
algorithm,
wonder: WonderBoard { wonder_type, wonder_side },
built_structures: vec![],
built_wonder_stages: vec![],
coins: 3,
hand: vec![],
}
}
pub fn algorithm(&self) -> &dyn PlayingAlgorithm {
&*self.algorithm
}
pub fn wonder(&self) -> &WonderBoard {
&self.wonder
}
pub fn built_structures(&self) -> &Vec<Card> {
&self.built_structures
}
pub fn coins(&self) -> u32 {
self.coins
}
pub fn hand(&self) -> &Vec<Card> {
&self.hand
}
/// Performs the given [`Action`] on the current player, for example moving a card from the player's hand into the
/// player's built structures. Returns `true` if the action is legal, `false` otherwise (in which case this function
/// otherwise does nothing).
pub fn do_action(&mut self, action: &Action, visible_game: &VisibleGame, discard_pile: &mut Vec<Card>) -> bool {
// Removes and returns the given card from the player's hand.
fn remove_from_hand(hand: &mut Vec<Card>, card: &Card) -> Card {
let index = hand.iter().position(|c| c == card).unwrap();
hand.swap_remove(index)
}
if self.can_play(action, visible_game) {
match action {
Action::Build(card) => {
let card_from_hand = remove_from_hand(&mut self.hand, card);
self.built_structures.push(card_from_hand);
self.coins -= card_from_hand.cost().coins;
// TODO: deal with borrowed resources
}
Action::Wonder(_) => todo!(),
Action::Discard(card) => {
discard_pile.push(remove_from_hand(&mut self.hand, card));
self.coins += 3;
}
}
true
} else {
false
}
}
/// Replaces this player's hand with the given cards, returning the hand the player had before the swap.
pub fn swap_hand(&mut self, new_hand: Vec<Card>) -> Vec<Card> {
mem::replace(&mut self.hand, new_hand)
}
fn evaluate_green(colour_cards: &[Card]) -> f32 {
let mut science_items_count: HashMap<ScienceItem, i32> = HashMap::new();
science_items_count.insert(ScienceItem::Compass, 0);
science_items_count.insert(ScienceItem::Cog, 0);
science_items_count.insert(ScienceItem::Tablet, 0);
for card in colour_cards.iter() {
if let Power::Science(science_items) = card.power() {
for science_item in science_items.iter() {
let count = science_items_count.entry(*science_item).or_insert(0);
*count += 1;
}
}
}
let score_for_sets_of_identical_symbols: f32 = science_items_count.iter()
.filter(|(_, count)| **count > 0)
.map(|(_, count)| {
(*count as f32).powf(2f32)
})
.sum();
let score_for_all_symbol_groups: f32 = 7f32 *
*science_items_count.iter().min_by_key(|(_, count)| *count).unwrap().1 as f32;
score_for_all_symbol_groups + score_for_sets_of_identical_symbols
}
fn evaluate_colour(cards_of_given_colour: &[Card]) -> f32 {
let colour = cards_of_given_colour.get(0).unwrap().colour();
match colour {
Colour::Green => Self::evaluate_green(cards_of_given_colour),
_ => cards_of_given_colour.iter().map(|card| card.immediate_strength()).sum(),
}
}
fn strength_internal(cards: &[Card]) -> f32 {
let mut colour_to_structure = HashMap::new();
for structure in cards.iter() {
let colour_structures = colour_to_structure.entry(structure.colour()).or_insert_with(Vec::new);
colour_structures.push(*structure)
}
colour_to_structure.iter()
.map(|colour_entry| Self::evaluate_colour(colour_entry.1))
.sum()
}
/// Returns this player's "strength" -- a number where a higher value means the player is doing better than a lower
/// value.
pub fn strength(&self) -> f32 {
Self::strength_internal(&self.built_structures)
}
pub fn can_play(&self, action: &Action, visible_game: &VisibleGame) -> bool {
match action {
Action::Build(card) => self.can_play_card(card, visible_game),
Action::Wonder(_) => todo!(),
Action::Discard(_) => true,
}
}
/// Returns `true` if the user can afford to play the given card, given the resources the player
/// has access to.
///
/// TODO: doesn't currently deal with borrowing resources from neighbours.
fn can_play_card(&self, card: &Card, _visible_game: &VisibleGame) -> bool {
if!self.hand.iter().any(|c| c == card) {
return false;
}
| // "choice" cards. At the same time, make a vector of resources choices available to us.
let mut choices = Vec::new();
for card in &self.built_structures {
match card.power() {
// TODO: can we write these four options more succinctly?
Power::PurchasableProducer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::Producer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::PurchasableProducer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
Power::Producer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
_ => {}
}
}
// Add Wonder starting resources.
available_resources += &self.wonder.starting_resource();
if available_resources.can_afford(&card.cost()) {
return true;
}
if!choices.is_empty() {
// Iterate through all possible combinations of the choices we have. Use the iteration
// index to work out which choice to make for each card.
let combinations: u32 = choices.iter()
.fold(1, |x, y| x * y.len() as u32);
for combination in 0..combinations {
let mut available_resources_option = available_resources.clone();
let mut combination = combination;
for choice in &choices {
let index = combination % choice.len() as u32;
available_resources_option += &choice[index as usize];
combination /= choice.len() as u32;
}
if available_resources_option.can_afford(&card.cost()) {
return true;
}
}
}
false
}
}
/// Represents the aspects of [`Player`] that are public knowledge (ie. visible on the table). Things like a player's
/// current hand are not included.
pub struct PublicPlayer {
pub wonder: WonderBoard,
pub built_structures: Vec<Card>,
pub coins: u32,
}
impl PublicPlayer {
/// Creates a [`PublicPlayer`] from a [`Player`], copy/cloning the values so the originals can be mutated later
/// without issue.
pub fn new(player: &Player) -> PublicPlayer {
PublicPlayer {
wonder: player.wonder,
built_structures: player.built_structures.clone(),
coins: player.coins,
}
}
}
#[cfg(test)]
mod tests {
use Card::*;
use super::*;
use crate::algorithms::random::Random;
#[test]
fn can_play_returns_true_when_player_can_afford_card() {
// TODO: @Before etc
let player = new_player(vec![LumberYard]);
assert_eq!(true, player.can_play(&Action::Build(LumberYard), &visible_game()));
}
#[test]
fn can_play_returns_true_after_player_builds_required_resources() {
let mut player = new_player(vec![StonePit, Quarry, Aqueduct]);
player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]);
assert_eq!(false, player.can_play(&Action::Build(Aqueduct), &visible_game()));
assert_eq!(true, player.do_action(&Action::Build(Quarry), &visible_game(), &mut vec![]));
assert_eq!(true, player.can_play(&Action::Build(Aqueduct), &visible_game()));
}
#[test]
fn strength_returns_sum_of_card_strengths() {
assert_eq!(0.0, Player::strength_internal(&vec![StonePit]));
assert_eq!(5.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct]));
assert_eq!(6.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct, Loom1, Apothecary]));
}
#[test]
fn strength_returns_correct_strength_of_green_structures() {
assert_eq!(1.0, Player::strength_internal(&vec![Lodge]));
assert_eq!(4.0, Player::strength_internal(&vec![Lodge, Apothecary]));
assert_eq!(9.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary]));
assert_eq!(10.0, Player::strength_internal(&vec![Lodge, Workshop, Library]));
assert_eq!(21.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary, Laboratory, Workshop, Library])); // rulebook example
}
#[test]
fn can_play_returns_false_when_player_cannot_pay() {
let mut player = new_player(vec![]);
player.coins = 0; //TODO introduce a Bank type to allow for double-entry bookkeeping instead of this
assert_eq!(false, player.can_play(&Action::Build(TreeFarm), &visible_game()));
}
#[test]
fn can_play_returns_false_when_both_choice_resources_needed() {
// TODO implement
}
#[test]
fn do_action_returns_false_if_action_not_playable() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(false, player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]));
}
#[test]
fn do_action_transfers_built_card_from_hand_to_built_structures() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(0, player.built_structures.len());
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Build(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(1, player.built_structures.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_decrements_cost_in_coins_when_building() {
let mut player = new_player(vec![TreeFarm]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Build(TreeFarm), &visible_game(), &mut vec![]));
assert_eq!(2, player.coins);
}
#[test]
fn do_action_transfers_discarded_card_from_hand_to_discard_pile() {
let mut player = new_player(vec![LumberYard]);
let mut discard_pile = vec![];
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut discard_pile));
assert_eq!(1, discard_pile.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_adds_three_coins_when_discarding() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(6, player.coins);
}
#[test]
fn new_public_player() {
let player = new_player(vec![LumberYard]);
let public_player = PublicPlayer::new(&player);
assert_eq!(player.wonder, public_player.wonder);
assert_eq!(player.built_structures, public_player.built_structures);
assert_eq!(player.coins, public_player.coins);
}
fn new_player(hand: Vec<Card>) -> Player {
let mut player = Player::new(WonderType::ColossusOfRhodes, WonderSide::A, Box::new(Random {}));
player.swap_hand(hand);
player
}
fn visible_game() -> VisibleGame<'static> {
VisibleGame { players: &[], player_index: 0 }
}
} | // Initialise a Resources struct with the number of coins we have.
let mut available_resources = Resources::coins(self.coins);
// Add all the other resources we always have access to (ie. those that are not resource | random_line_split |
runtime.rs | use crate::runtime::blocking::BlockingPool;
use crate::runtime::scheduler::CurrentThread;
use crate::runtime::{context, EnterGuard, Handle};
use crate::task::JoinHandle;
use std::future::Future;
use std::time::Duration;
cfg_rt_multi_thread! {
use crate::runtime::Builder;
use crate::runtime::scheduler::MultiThread;
cfg_unstable! {
use crate::runtime::scheduler::MultiThreadAlt;
}
}
/// The Tokio runtime.
///
/// The runtime provides an I/O driver, task scheduler, [timer], and
/// blocking pool, necessary for running asynchronous tasks.
///
/// Instances of `Runtime` can be created using [`new`], or [`Builder`].
/// However, most users will use the `#[tokio::main]` annotation on their
/// entry point instead.
///
/// See [module level][mod] documentation for more details.
///
/// # Shutdown
///
/// Shutting down the runtime is done by dropping the value, or calling
/// [`shutdown_background`] or [`shutdown_timeout`].
///
/// Tasks spawned through [`Runtime::spawn`] keep running until they yield.
/// Then they are dropped. They are not *guaranteed* to run to completion, but
/// *might* do so if they do not yield until completion.
///
/// Blocking functions spawned through [`Runtime::spawn_blocking`] keep running
/// until they return.
///
/// The thread initiating the shutdown blocks until all spawned work has been
/// stopped. This can take an indefinite amount of time. The `Drop`
/// implementation waits forever for this.
///
/// The [`shutdown_background`] and [`shutdown_timeout`] methods can be used if
/// waiting forever is undesired. When the timeout is reached, spawned work that
/// did not stop in time and threads running it are leaked. The work continues
/// to run until one of the stopping conditions is fulfilled, but the thread
/// initiating the shutdown is unblocked.
///
/// Once the runtime has been dropped, any outstanding I/O resources bound to
/// it will no longer function. Calling any method on them will result in an
/// error.
///
/// # Sharing
///
/// There are several ways to establish shared access to a Tokio runtime:
///
/// * Using an <code>[Arc]\<Runtime></code>.
/// * Using a [`Handle`].
/// * Entering the runtime context.
///
/// Using an <code>[Arc]\<Runtime></code> or [`Handle`] allows you to do various
/// things with the runtime such as spawning new tasks or entering the runtime
/// context. Both types can be cloned to create a new handle that allows access
/// to the same runtime. By passing clones into different tasks or threads, you
/// will be able to access the runtime from those tasks or threads.
///
/// The difference between <code>[Arc]\<Runtime></code> and [`Handle`] is that
/// an <code>[Arc]\<Runtime></code> will prevent the runtime from shutting down,
/// whereas a [`Handle`] does not prevent that. This is because shutdown of the
/// runtime happens when the destructor of the `Runtime` object runs.
///
/// Calls to [`shutdown_background`] and [`shutdown_timeout`] require exclusive
/// ownership of the `Runtime` type. When using an <code>[Arc]\<Runtime></code>,
/// this can be achieved via [`Arc::try_unwrap`] when only one strong count
/// reference is left over.
///
/// The runtime context is entered using the [`Runtime::enter`] or
/// [`Handle::enter`] methods, which use a thread-local variable to store the
/// current runtime. Whenever you are inside the runtime context, methods such
/// as [`tokio::spawn`] will use the runtime whose context you are inside.
///
/// [timer]: crate::time
/// [mod]: index.html
/// [`new`]: method@Self::new
/// [`Builder`]: struct@Builder
/// [`Handle`]: struct@Handle
/// [`tokio::spawn`]: crate::spawn
/// [`Arc::try_unwrap`]: std::sync::Arc::try_unwrap
/// [Arc]: std::sync::Arc
/// [`shutdown_background`]: method@Runtime::shutdown_background
/// [`shutdown_timeout`]: method@Runtime::shutdown_timeout
#[derive(Debug)]
pub struct Runtime {
/// Task scheduler
scheduler: Scheduler,
/// Handle to runtime, also contains driver handles
handle: Handle,
/// Blocking pool handle, used to signal shutdown
blocking_pool: BlockingPool,
}
/// The flavor of a `Runtime`.
///
/// This is the return type for [`Handle::runtime_flavor`](crate::runtime::Handle::runtime_flavor()).
#[derive(Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum RuntimeFlavor {
/// The flavor that executes all tasks on the current thread.
CurrentThread,
/// The flavor that executes tasks across multiple threads.
MultiThread,
/// The flavor that executes tasks across multiple threads.
#[cfg(tokio_unstable)]
MultiThreadAlt,
}
/// The runtime scheduler is either a multi-thread or a current-thread executor.
#[derive(Debug)]
pub(super) enum Scheduler {
/// Execute all tasks on the current-thread.
CurrentThread(CurrentThread),
/// Execute tasks across multiple threads.
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThread(MultiThread),
/// Execute tasks across multiple threads.
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThreadAlt(MultiThreadAlt),
}
impl Runtime {
pub(super) fn from_parts(
scheduler: Scheduler,
handle: Handle,
blocking_pool: BlockingPool,
) -> Runtime {
Runtime {
scheduler,
handle,
blocking_pool,
}
}
cfg_not_wasi! {
/// Creates a new runtime instance with default configuration values.
///
/// This results in the multi threaded scheduler, I/O driver, and time driver being
/// initialized.
///
/// Most applications will not need to call this function directly. Instead,
/// they will use the [`#[tokio::main]` attribute][main]. When a more complex
/// configuration is necessary, the [runtime builder] may be used.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
/// ``` | /// [runtime builder]: crate::runtime::Builder
#[cfg(feature = "rt-multi-thread")]
#[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))]
pub fn new() -> std::io::Result<Runtime> {
Builder::new_multi_thread().enable_all().build()
}
}
/// Returns a handle to the runtime's spawner.
///
/// The returned handle can be used to spawn tasks that run on this runtime, and can
/// be cloned to allow moving the `Handle` to other threads.
///
/// Calling [`Handle::block_on`] on a handle to a `current_thread` runtime is error-prone.
/// Refer to the documentation of [`Handle::block_on`] for more.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let handle = rt.handle();
///
/// // Use the handle...
/// ```
pub fn handle(&self) -> &Handle {
&self.handle
}
/// Spawns a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// The provided future will start running in the background immediately
/// when `spawn` is called, even if you don't await the returned
/// `JoinHandle`.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(async {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + Send +'static,
F::Output: Send +'static,
{
self.handle.spawn(future)
}
/// Runs the provided function on an executor dedicated to blocking operations.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a blocking function onto the runtime
/// rt.spawn_blocking(|| {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn_blocking<F, R>(&self, func: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send +'static,
R: Send +'static,
{
self.handle.spawn_blocking(func)
}
/// Runs a future to completion on the Tokio runtime. This is the
/// runtime's entry point.
///
/// This runs the given future on the current thread, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers
/// which the future spawns internally will be executed on the runtime.
///
/// # Non-worker future
///
/// Note that the future required by this function does not run as a
/// worker. The expectation is that other tasks are spawned by the future here.
/// Awaiting on other futures from the future provided here will not
/// perform as fast as those spawned as workers.
///
/// # Multi thread scheduler
///
/// When the multi thread scheduler is used this will allow futures
/// to run within the io driver and timer context of the overall runtime.
///
/// Any spawned tasks will continue running after `block_on` returns.
///
/// # Current thread scheduler
///
/// When the current thread scheduler is enabled `block_on`
/// can be called concurrently from multiple threads. The first call
/// will take ownership of the io and timer drivers. This means
/// other threads which do not own the drivers will hook into that one.
/// When the first `block_on` completes, other threads will be able to
/// "steal" the driver to allow continued execution of their futures.
///
/// Any spawned tasks will be suspended after `block_on` returns. Calling
/// `block_on` again will resume previously spawned tasks.
///
/// # Panics
///
/// This function panics if the provided future panics, or if called within an
/// asynchronous execution context.
///
/// # Examples
///
/// ```no_run
/// use tokio::runtime::Runtime;
///
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Execute the future, blocking the current thread until completion
/// rt.block_on(async {
/// println!("hello");
/// });
/// ```
///
/// [handle]: fn@Handle::block_on
#[track_caller]
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
#[cfg(all(
tokio_unstable,
tokio_taskdump,
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
let future = super::task::trace::Trace::root(future);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let future = crate::util::trace::task(
future,
"block_on",
None,
crate::runtime::task::Id::next().as_u64(),
);
let _enter = self.enter();
match &self.scheduler {
Scheduler::CurrentThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(exec) => exec.block_on(&self.handle.inner, future),
}
}
/// Enters the runtime context.
///
/// This allows you to construct types that must have an executor
/// available on creation such as [`Sleep`] or [`TcpStream`]. It will
/// also allow you to call methods such as [`tokio::spawn`].
///
/// [`Sleep`]: struct@crate::time::Sleep
/// [`TcpStream`]: struct@crate::net::TcpStream
/// [`tokio::spawn`]: fn@crate::spawn
///
/// # Example
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn function_that_spawns(msg: String) {
/// // Had we not used `rt.enter` below, this would panic.
/// tokio::spawn(async move {
/// println!("{}", msg);
/// });
/// }
///
/// fn main() {
/// let rt = Runtime::new().unwrap();
///
/// let s = "Hello World!".to_string();
///
/// // By entering the context, we tie `tokio::spawn` to this executor.
/// let _guard = rt.enter();
/// function_that_spawns(s);
/// }
/// ```
pub fn enter(&self) -> EnterGuard<'_> {
self.handle.enter()
}
/// Shuts down the runtime, waiting for at most `duration` for all spawned
/// work to stop.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::task;
///
/// use std::thread;
/// use std::time::Duration;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// task::spawn_blocking(move || {
/// thread::sleep(Duration::from_secs(10_000));
/// });
/// });
///
/// runtime.shutdown_timeout(Duration::from_millis(100));
/// }
/// ```
pub fn shutdown_timeout(mut self, duration: Duration) {
// Wakeup and shutdown all the worker threads
self.handle.inner.shutdown();
self.blocking_pool.shutdown(Some(duration));
}
/// Shuts down the runtime, without waiting for any spawned work to stop.
///
/// This can be useful if you want to drop a runtime from within another runtime.
/// Normally, dropping a runtime will block indefinitely for spawned blocking tasks
/// to complete, which would normally not be permitted within an asynchronous context.
/// By calling `shutdown_background()`, you can drop the runtime from such a context.
///
/// Note however, that because we do not wait for any blocking tasks to complete, this
/// may result in a resource leak (in that any blocking tasks are still running until they
/// return.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// This function is equivalent to calling `shutdown_timeout(Duration::from_nanos(0))`.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// let inner_runtime = Runtime::new().unwrap();
/// //...
/// inner_runtime.shutdown_background();
/// });
/// }
/// ```
pub fn shutdown_background(self) {
self.shutdown_timeout(Duration::from_nanos(0))
}
}
#[allow(clippy::single_match)] // there are comments in the error branch, so we don't want if-let
impl Drop for Runtime {
fn drop(&mut self) {
match &mut self.scheduler {
Scheduler::CurrentThread(current_thread) => {
// This ensures that tasks spawned on the current-thread
// runtime are dropped inside the runtime's context.
let _guard = context::try_set_current(&self.handle.inner);
current_thread.shutdown(&self.handle.inner);
}
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
}
}
}
cfg_metrics! {
impl Runtime {
/// TODO
pub fn metrics(&self) -> crate::runtime::RuntimeMetrics {
self.handle.metrics()
}
}
} | ///
/// [mod]: index.html
/// [main]: ../attr.main.html
/// [threaded scheduler]: index.html#threaded-scheduler | random_line_split |
runtime.rs | use crate::runtime::blocking::BlockingPool;
use crate::runtime::scheduler::CurrentThread;
use crate::runtime::{context, EnterGuard, Handle};
use crate::task::JoinHandle;
use std::future::Future;
use std::time::Duration;
cfg_rt_multi_thread! {
use crate::runtime::Builder;
use crate::runtime::scheduler::MultiThread;
cfg_unstable! {
use crate::runtime::scheduler::MultiThreadAlt;
}
}
/// The Tokio runtime.
///
/// The runtime provides an I/O driver, task scheduler, [timer], and
/// blocking pool, necessary for running asynchronous tasks.
///
/// Instances of `Runtime` can be created using [`new`], or [`Builder`].
/// However, most users will use the `#[tokio::main]` annotation on their
/// entry point instead.
///
/// See [module level][mod] documentation for more details.
///
/// # Shutdown
///
/// Shutting down the runtime is done by dropping the value, or calling
/// [`shutdown_background`] or [`shutdown_timeout`].
///
/// Tasks spawned through [`Runtime::spawn`] keep running until they yield.
/// Then they are dropped. They are not *guaranteed* to run to completion, but
/// *might* do so if they do not yield until completion.
///
/// Blocking functions spawned through [`Runtime::spawn_blocking`] keep running
/// until they return.
///
/// The thread initiating the shutdown blocks until all spawned work has been
/// stopped. This can take an indefinite amount of time. The `Drop`
/// implementation waits forever for this.
///
/// The [`shutdown_background`] and [`shutdown_timeout`] methods can be used if
/// waiting forever is undesired. When the timeout is reached, spawned work that
/// did not stop in time and threads running it are leaked. The work continues
/// to run until one of the stopping conditions is fulfilled, but the thread
/// initiating the shutdown is unblocked.
///
/// Once the runtime has been dropped, any outstanding I/O resources bound to
/// it will no longer function. Calling any method on them will result in an
/// error.
///
/// # Sharing
///
/// There are several ways to establish shared access to a Tokio runtime:
///
/// * Using an <code>[Arc]\<Runtime></code>.
/// * Using a [`Handle`].
/// * Entering the runtime context.
///
/// Using an <code>[Arc]\<Runtime></code> or [`Handle`] allows you to do various
/// things with the runtime such as spawning new tasks or entering the runtime
/// context. Both types can be cloned to create a new handle that allows access
/// to the same runtime. By passing clones into different tasks or threads, you
/// will be able to access the runtime from those tasks or threads.
///
/// The difference between <code>[Arc]\<Runtime></code> and [`Handle`] is that
/// an <code>[Arc]\<Runtime></code> will prevent the runtime from shutting down,
/// whereas a [`Handle`] does not prevent that. This is because shutdown of the
/// runtime happens when the destructor of the `Runtime` object runs.
///
/// Calls to [`shutdown_background`] and [`shutdown_timeout`] require exclusive
/// ownership of the `Runtime` type. When using an <code>[Arc]\<Runtime></code>,
/// this can be achieved via [`Arc::try_unwrap`] when only one strong count
/// reference is left over.
///
/// The runtime context is entered using the [`Runtime::enter`] or
/// [`Handle::enter`] methods, which use a thread-local variable to store the
/// current runtime. Whenever you are inside the runtime context, methods such
/// as [`tokio::spawn`] will use the runtime whose context you are inside.
///
/// [timer]: crate::time
/// [mod]: index.html
/// [`new`]: method@Self::new
/// [`Builder`]: struct@Builder
/// [`Handle`]: struct@Handle
/// [`tokio::spawn`]: crate::spawn
/// [`Arc::try_unwrap`]: std::sync::Arc::try_unwrap
/// [Arc]: std::sync::Arc
/// [`shutdown_background`]: method@Runtime::shutdown_background
/// [`shutdown_timeout`]: method@Runtime::shutdown_timeout
#[derive(Debug)]
pub struct Runtime {
/// Task scheduler
scheduler: Scheduler,
/// Handle to runtime, also contains driver handles
handle: Handle,
/// Blocking pool handle, used to signal shutdown
blocking_pool: BlockingPool,
}
/// The flavor of a `Runtime`.
///
/// This is the return type for [`Handle::runtime_flavor`](crate::runtime::Handle::runtime_flavor()).
#[derive(Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum RuntimeFlavor {
/// The flavor that executes all tasks on the current thread.
CurrentThread,
/// The flavor that executes tasks across multiple threads.
MultiThread,
/// The flavor that executes tasks across multiple threads.
#[cfg(tokio_unstable)]
MultiThreadAlt,
}
/// The runtime scheduler is either a multi-thread or a current-thread executor.
#[derive(Debug)]
pub(super) enum Scheduler {
/// Execute all tasks on the current-thread.
CurrentThread(CurrentThread),
/// Execute tasks across multiple threads.
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThread(MultiThread),
/// Execute tasks across multiple threads.
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThreadAlt(MultiThreadAlt),
}
impl Runtime {
pub(super) fn | (
scheduler: Scheduler,
handle: Handle,
blocking_pool: BlockingPool,
) -> Runtime {
Runtime {
scheduler,
handle,
blocking_pool,
}
}
cfg_not_wasi! {
/// Creates a new runtime instance with default configuration values.
///
/// This results in the multi threaded scheduler, I/O driver, and time driver being
/// initialized.
///
/// Most applications will not need to call this function directly. Instead,
/// they will use the [`#[tokio::main]` attribute][main]. When a more complex
/// configuration is necessary, the [runtime builder] may be used.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
/// ```
///
/// [mod]: index.html
/// [main]:../attr.main.html
/// [threaded scheduler]: index.html#threaded-scheduler
/// [runtime builder]: crate::runtime::Builder
#[cfg(feature = "rt-multi-thread")]
#[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))]
pub fn new() -> std::io::Result<Runtime> {
Builder::new_multi_thread().enable_all().build()
}
}
/// Returns a handle to the runtime's spawner.
///
/// The returned handle can be used to spawn tasks that run on this runtime, and can
/// be cloned to allow moving the `Handle` to other threads.
///
/// Calling [`Handle::block_on`] on a handle to a `current_thread` runtime is error-prone.
/// Refer to the documentation of [`Handle::block_on`] for more.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let handle = rt.handle();
///
/// // Use the handle...
/// ```
pub fn handle(&self) -> &Handle {
&self.handle
}
/// Spawns a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// The provided future will start running in the background immediately
/// when `spawn` is called, even if you don't await the returned
/// `JoinHandle`.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(async {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + Send +'static,
F::Output: Send +'static,
{
self.handle.spawn(future)
}
/// Runs the provided function on an executor dedicated to blocking operations.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a blocking function onto the runtime
/// rt.spawn_blocking(|| {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn_blocking<F, R>(&self, func: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send +'static,
R: Send +'static,
{
self.handle.spawn_blocking(func)
}
/// Runs a future to completion on the Tokio runtime. This is the
/// runtime's entry point.
///
/// This runs the given future on the current thread, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers
/// which the future spawns internally will be executed on the runtime.
///
/// # Non-worker future
///
/// Note that the future required by this function does not run as a
/// worker. The expectation is that other tasks are spawned by the future here.
/// Awaiting on other futures from the future provided here will not
/// perform as fast as those spawned as workers.
///
/// # Multi thread scheduler
///
/// When the multi thread scheduler is used this will allow futures
/// to run within the io driver and timer context of the overall runtime.
///
/// Any spawned tasks will continue running after `block_on` returns.
///
/// # Current thread scheduler
///
/// When the current thread scheduler is enabled `block_on`
/// can be called concurrently from multiple threads. The first call
/// will take ownership of the io and timer drivers. This means
/// other threads which do not own the drivers will hook into that one.
/// When the first `block_on` completes, other threads will be able to
/// "steal" the driver to allow continued execution of their futures.
///
/// Any spawned tasks will be suspended after `block_on` returns. Calling
/// `block_on` again will resume previously spawned tasks.
///
/// # Panics
///
/// This function panics if the provided future panics, or if called within an
/// asynchronous execution context.
///
/// # Examples
///
/// ```no_run
/// use tokio::runtime::Runtime;
///
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Execute the future, blocking the current thread until completion
/// rt.block_on(async {
/// println!("hello");
/// });
/// ```
///
/// [handle]: fn@Handle::block_on
#[track_caller]
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
#[cfg(all(
tokio_unstable,
tokio_taskdump,
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
let future = super::task::trace::Trace::root(future);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let future = crate::util::trace::task(
future,
"block_on",
None,
crate::runtime::task::Id::next().as_u64(),
);
let _enter = self.enter();
match &self.scheduler {
Scheduler::CurrentThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(exec) => exec.block_on(&self.handle.inner, future),
}
}
/// Enters the runtime context.
///
/// This allows you to construct types that must have an executor
/// available on creation such as [`Sleep`] or [`TcpStream`]. It will
/// also allow you to call methods such as [`tokio::spawn`].
///
/// [`Sleep`]: struct@crate::time::Sleep
/// [`TcpStream`]: struct@crate::net::TcpStream
/// [`tokio::spawn`]: fn@crate::spawn
///
/// # Example
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn function_that_spawns(msg: String) {
/// // Had we not used `rt.enter` below, this would panic.
/// tokio::spawn(async move {
/// println!("{}", msg);
/// });
/// }
///
/// fn main() {
/// let rt = Runtime::new().unwrap();
///
/// let s = "Hello World!".to_string();
///
/// // By entering the context, we tie `tokio::spawn` to this executor.
/// let _guard = rt.enter();
/// function_that_spawns(s);
/// }
/// ```
pub fn enter(&self) -> EnterGuard<'_> {
self.handle.enter()
}
/// Shuts down the runtime, waiting for at most `duration` for all spawned
/// work to stop.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::task;
///
/// use std::thread;
/// use std::time::Duration;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// task::spawn_blocking(move || {
/// thread::sleep(Duration::from_secs(10_000));
/// });
/// });
///
/// runtime.shutdown_timeout(Duration::from_millis(100));
/// }
/// ```
pub fn shutdown_timeout(mut self, duration: Duration) {
// Wakeup and shutdown all the worker threads
self.handle.inner.shutdown();
self.blocking_pool.shutdown(Some(duration));
}
/// Shuts down the runtime, without waiting for any spawned work to stop.
///
/// This can be useful if you want to drop a runtime from within another runtime.
/// Normally, dropping a runtime will block indefinitely for spawned blocking tasks
/// to complete, which would normally not be permitted within an asynchronous context.
/// By calling `shutdown_background()`, you can drop the runtime from such a context.
///
/// Note however, that because we do not wait for any blocking tasks to complete, this
/// may result in a resource leak (in that any blocking tasks are still running until they
/// return.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// This function is equivalent to calling `shutdown_timeout(Duration::from_nanos(0))`.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// let inner_runtime = Runtime::new().unwrap();
/// //...
/// inner_runtime.shutdown_background();
/// });
/// }
/// ```
pub fn shutdown_background(self) {
self.shutdown_timeout(Duration::from_nanos(0))
}
}
#[allow(clippy::single_match)] // there are comments in the error branch, so we don't want if-let
impl Drop for Runtime {
fn drop(&mut self) {
match &mut self.scheduler {
Scheduler::CurrentThread(current_thread) => {
// This ensures that tasks spawned on the current-thread
// runtime are dropped inside the runtime's context.
let _guard = context::try_set_current(&self.handle.inner);
current_thread.shutdown(&self.handle.inner);
}
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
}
}
}
cfg_metrics! {
impl Runtime {
/// TODO
pub fn metrics(&self) -> crate::runtime::RuntimeMetrics {
self.handle.metrics()
}
}
}
| from_parts | identifier_name |
runtime.rs | use crate::runtime::blocking::BlockingPool;
use crate::runtime::scheduler::CurrentThread;
use crate::runtime::{context, EnterGuard, Handle};
use crate::task::JoinHandle;
use std::future::Future;
use std::time::Duration;
cfg_rt_multi_thread! {
use crate::runtime::Builder;
use crate::runtime::scheduler::MultiThread;
cfg_unstable! {
use crate::runtime::scheduler::MultiThreadAlt;
}
}
/// The Tokio runtime.
///
/// The runtime provides an I/O driver, task scheduler, [timer], and
/// blocking pool, necessary for running asynchronous tasks.
///
/// Instances of `Runtime` can be created using [`new`], or [`Builder`].
/// However, most users will use the `#[tokio::main]` annotation on their
/// entry point instead.
///
/// See [module level][mod] documentation for more details.
///
/// # Shutdown
///
/// Shutting down the runtime is done by dropping the value, or calling
/// [`shutdown_background`] or [`shutdown_timeout`].
///
/// Tasks spawned through [`Runtime::spawn`] keep running until they yield.
/// Then they are dropped. They are not *guaranteed* to run to completion, but
/// *might* do so if they do not yield until completion.
///
/// Blocking functions spawned through [`Runtime::spawn_blocking`] keep running
/// until they return.
///
/// The thread initiating the shutdown blocks until all spawned work has been
/// stopped. This can take an indefinite amount of time. The `Drop`
/// implementation waits forever for this.
///
/// The [`shutdown_background`] and [`shutdown_timeout`] methods can be used if
/// waiting forever is undesired. When the timeout is reached, spawned work that
/// did not stop in time and threads running it are leaked. The work continues
/// to run until one of the stopping conditions is fulfilled, but the thread
/// initiating the shutdown is unblocked.
///
/// Once the runtime has been dropped, any outstanding I/O resources bound to
/// it will no longer function. Calling any method on them will result in an
/// error.
///
/// # Sharing
///
/// There are several ways to establish shared access to a Tokio runtime:
///
/// * Using an <code>[Arc]\<Runtime></code>.
/// * Using a [`Handle`].
/// * Entering the runtime context.
///
/// Using an <code>[Arc]\<Runtime></code> or [`Handle`] allows you to do various
/// things with the runtime such as spawning new tasks or entering the runtime
/// context. Both types can be cloned to create a new handle that allows access
/// to the same runtime. By passing clones into different tasks or threads, you
/// will be able to access the runtime from those tasks or threads.
///
/// The difference between <code>[Arc]\<Runtime></code> and [`Handle`] is that
/// an <code>[Arc]\<Runtime></code> will prevent the runtime from shutting down,
/// whereas a [`Handle`] does not prevent that. This is because shutdown of the
/// runtime happens when the destructor of the `Runtime` object runs.
///
/// Calls to [`shutdown_background`] and [`shutdown_timeout`] require exclusive
/// ownership of the `Runtime` type. When using an <code>[Arc]\<Runtime></code>,
/// this can be achieved via [`Arc::try_unwrap`] when only one strong count
/// reference is left over.
///
/// The runtime context is entered using the [`Runtime::enter`] or
/// [`Handle::enter`] methods, which use a thread-local variable to store the
/// current runtime. Whenever you are inside the runtime context, methods such
/// as [`tokio::spawn`] will use the runtime whose context you are inside.
///
/// [timer]: crate::time
/// [mod]: index.html
/// [`new`]: method@Self::new
/// [`Builder`]: struct@Builder
/// [`Handle`]: struct@Handle
/// [`tokio::spawn`]: crate::spawn
/// [`Arc::try_unwrap`]: std::sync::Arc::try_unwrap
/// [Arc]: std::sync::Arc
/// [`shutdown_background`]: method@Runtime::shutdown_background
/// [`shutdown_timeout`]: method@Runtime::shutdown_timeout
#[derive(Debug)]
pub struct Runtime {
/// Task scheduler
scheduler: Scheduler,
/// Handle to runtime, also contains driver handles
handle: Handle,
/// Blocking pool handle, used to signal shutdown
blocking_pool: BlockingPool,
}
/// The flavor of a `Runtime`.
///
/// This is the return type for [`Handle::runtime_flavor`](crate::runtime::Handle::runtime_flavor()).
#[derive(Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum RuntimeFlavor {
/// The flavor that executes all tasks on the current thread.
CurrentThread,
/// The flavor that executes tasks across multiple threads.
MultiThread,
/// The flavor that executes tasks across multiple threads.
#[cfg(tokio_unstable)]
MultiThreadAlt,
}
/// The runtime scheduler is either a multi-thread or a current-thread executor.
#[derive(Debug)]
pub(super) enum Scheduler {
/// Execute all tasks on the current-thread.
CurrentThread(CurrentThread),
/// Execute tasks across multiple threads.
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThread(MultiThread),
/// Execute tasks across multiple threads.
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThreadAlt(MultiThreadAlt),
}
impl Runtime {
pub(super) fn from_parts(
scheduler: Scheduler,
handle: Handle,
blocking_pool: BlockingPool,
) -> Runtime {
Runtime {
scheduler,
handle,
blocking_pool,
}
}
cfg_not_wasi! {
/// Creates a new runtime instance with default configuration values.
///
/// This results in the multi threaded scheduler, I/O driver, and time driver being
/// initialized.
///
/// Most applications will not need to call this function directly. Instead,
/// they will use the [`#[tokio::main]` attribute][main]. When a more complex
/// configuration is necessary, the [runtime builder] may be used.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
/// ```
///
/// [mod]: index.html
/// [main]:../attr.main.html
/// [threaded scheduler]: index.html#threaded-scheduler
/// [runtime builder]: crate::runtime::Builder
#[cfg(feature = "rt-multi-thread")]
#[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))]
pub fn new() -> std::io::Result<Runtime> {
Builder::new_multi_thread().enable_all().build()
}
}
/// Returns a handle to the runtime's spawner.
///
/// The returned handle can be used to spawn tasks that run on this runtime, and can
/// be cloned to allow moving the `Handle` to other threads.
///
/// Calling [`Handle::block_on`] on a handle to a `current_thread` runtime is error-prone.
/// Refer to the documentation of [`Handle::block_on`] for more.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let handle = rt.handle();
///
/// // Use the handle...
/// ```
pub fn handle(&self) -> &Handle {
&self.handle
}
/// Spawns a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// The provided future will start running in the background immediately
/// when `spawn` is called, even if you don't await the returned
/// `JoinHandle`.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(async {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + Send +'static,
F::Output: Send +'static,
{
self.handle.spawn(future)
}
/// Runs the provided function on an executor dedicated to blocking operations.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a blocking function onto the runtime
/// rt.spawn_blocking(|| {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn_blocking<F, R>(&self, func: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send +'static,
R: Send +'static,
{
self.handle.spawn_blocking(func)
}
/// Runs a future to completion on the Tokio runtime. This is the
/// runtime's entry point.
///
/// This runs the given future on the current thread, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers
/// which the future spawns internally will be executed on the runtime.
///
/// # Non-worker future
///
/// Note that the future required by this function does not run as a
/// worker. The expectation is that other tasks are spawned by the future here.
/// Awaiting on other futures from the future provided here will not
/// perform as fast as those spawned as workers.
///
/// # Multi thread scheduler
///
/// When the multi thread scheduler is used this will allow futures
/// to run within the io driver and timer context of the overall runtime.
///
/// Any spawned tasks will continue running after `block_on` returns.
///
/// # Current thread scheduler
///
/// When the current thread scheduler is enabled `block_on`
/// can be called concurrently from multiple threads. The first call
/// will take ownership of the io and timer drivers. This means
/// other threads which do not own the drivers will hook into that one.
/// When the first `block_on` completes, other threads will be able to
/// "steal" the driver to allow continued execution of their futures.
///
/// Any spawned tasks will be suspended after `block_on` returns. Calling
/// `block_on` again will resume previously spawned tasks.
///
/// # Panics
///
/// This function panics if the provided future panics, or if called within an
/// asynchronous execution context.
///
/// # Examples
///
/// ```no_run
/// use tokio::runtime::Runtime;
///
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Execute the future, blocking the current thread until completion
/// rt.block_on(async {
/// println!("hello");
/// });
/// ```
///
/// [handle]: fn@Handle::block_on
#[track_caller]
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
#[cfg(all(
tokio_unstable,
tokio_taskdump,
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
let future = super::task::trace::Trace::root(future);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let future = crate::util::trace::task(
future,
"block_on",
None,
crate::runtime::task::Id::next().as_u64(),
);
let _enter = self.enter();
match &self.scheduler {
Scheduler::CurrentThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(exec) => exec.block_on(&self.handle.inner, future),
}
}
/// Enters the runtime context.
///
/// This allows you to construct types that must have an executor
/// available on creation such as [`Sleep`] or [`TcpStream`]. It will
/// also allow you to call methods such as [`tokio::spawn`].
///
/// [`Sleep`]: struct@crate::time::Sleep
/// [`TcpStream`]: struct@crate::net::TcpStream
/// [`tokio::spawn`]: fn@crate::spawn
///
/// # Example
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn function_that_spawns(msg: String) {
/// // Had we not used `rt.enter` below, this would panic.
/// tokio::spawn(async move {
/// println!("{}", msg);
/// });
/// }
///
/// fn main() {
/// let rt = Runtime::new().unwrap();
///
/// let s = "Hello World!".to_string();
///
/// // By entering the context, we tie `tokio::spawn` to this executor.
/// let _guard = rt.enter();
/// function_that_spawns(s);
/// }
/// ```
pub fn enter(&self) -> EnterGuard<'_> {
self.handle.enter()
}
/// Shuts down the runtime, waiting for at most `duration` for all spawned
/// work to stop.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::task;
///
/// use std::thread;
/// use std::time::Duration;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// task::spawn_blocking(move || {
/// thread::sleep(Duration::from_secs(10_000));
/// });
/// });
///
/// runtime.shutdown_timeout(Duration::from_millis(100));
/// }
/// ```
pub fn shutdown_timeout(mut self, duration: Duration) {
// Wakeup and shutdown all the worker threads
self.handle.inner.shutdown();
self.blocking_pool.shutdown(Some(duration));
}
/// Shuts down the runtime, without waiting for any spawned work to stop.
///
/// This can be useful if you want to drop a runtime from within another runtime.
/// Normally, dropping a runtime will block indefinitely for spawned blocking tasks
/// to complete, which would normally not be permitted within an asynchronous context.
/// By calling `shutdown_background()`, you can drop the runtime from such a context.
///
/// Note however, that because we do not wait for any blocking tasks to complete, this
/// may result in a resource leak (in that any blocking tasks are still running until they
/// return.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// This function is equivalent to calling `shutdown_timeout(Duration::from_nanos(0))`.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// let inner_runtime = Runtime::new().unwrap();
/// //...
/// inner_runtime.shutdown_background();
/// });
/// }
/// ```
pub fn shutdown_background(self) {
self.shutdown_timeout(Duration::from_nanos(0))
}
}
#[allow(clippy::single_match)] // there are comments in the error branch, so we don't want if-let
impl Drop for Runtime {
fn drop(&mut self) | }
}
}
cfg_metrics! {
impl Runtime {
/// TODO
pub fn metrics(&self) -> crate::runtime::RuntimeMetrics {
self.handle.metrics()
}
}
}
| {
match &mut self.scheduler {
Scheduler::CurrentThread(current_thread) => {
// This ensures that tasks spawned on the current-thread
// runtime are dropped inside the runtime's context.
let _guard = context::try_set_current(&self.handle.inner);
current_thread.shutdown(&self.handle.inner);
}
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
} | identifier_body |
pool.rs | //! `LoggedPool` structure for logging raw tasks events.
#![macro_use]
// we can now use performance counters to tag subgraphs
#[cfg(feature = "perf")]
use perfcnt::linux::PerfCounterBuilderLinux;
#[cfg(feature = "perf")]
use perfcnt::linux::{CacheId, CacheOpId, CacheOpResultId, HardwareEventType, SoftwareEventType};
#[cfg(feature = "perf")]
use perfcnt::{AbstractPerfCounter, PerfCounter};
use crate::log::RunLog;
use crate::raw_events::{now, RayonEvent, TaskId};
use crate::storage::Storage;
use crate::Comparator;
use crate::{scope, scope_fifo, Scope, ScopeFifo};
use rayon;
use rayon::FnContext;
use std::cell::RefCell;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
/// We use an atomic usize to generate unique ids for tasks.
pub(crate) static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
/// We use an atomic usize to generate unique ids for iterators.
pub(crate) static NEXT_ITERATOR_ID: AtomicUsize = AtomicUsize::new(0);
/// get an id for a new task and increment global tasks counter.
pub fn next_task_id() -> TaskId {
NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst)
}
/// get an id for a new iterator and increment global iterators counter.
pub fn next_iterator_id() -> usize {
NEXT_ITERATOR_ID.fetch_add(1, Ordering::SeqCst)
}
thread_local!(pub(crate) static LOGS: RefCell<Arc<Storage<RayonEvent>>> = RefCell::new(Arc::new(Storage::new())));
/// Add given event to logs of current thread.
pub(crate) fn log(event: RayonEvent) {
LOGS.with(|l| l.borrow().push(event))
}
/// Logs several events at once (with decreased cost).
macro_rules! logs {
($($x:expr ), +) => {
$crate::pool::LOGS.with(|l| {let thread_logs = l.borrow();
$(
thread_logs.push($x);
)*
})
}
}
/// We tag all the tasks that op makes as one subgraph.
///
/// `work_type` is a str tag and `work_amount` an integer specifying the expected algorithmic cost
/// (should not be zero).
/// As we know the work and execution time we can compute an execution speed for each subgraph.
/// When different graphs are tagged with the same tag we can then compare their speeds.
/// Slow graphs will see their displayed colors darkened.
/// You can also hover on tasks to display their speeds.
///
/// Example:
///
/// ```
/// use rayon_logs::{join, subgraph, ThreadPoolBuilder};
///
/// fn manual_max(slice: &[u32]) -> u32 {
/// if slice.len() < 200_000 {
/// subgraph("max", slice.len(), || slice.iter().max().cloned().unwrap())
/// } else {
/// let middle = slice.len() / 2;
/// let (left, right) = slice.split_at(middle);
/// let (mleft, mright) = join(|| manual_max(left), || manual_max(right));
/// std::cmp::max(mleft, mright)
/// }
/// }
///
/// let v: Vec<u32> = (0..2_000_000).collect();
/// let pool = ThreadPoolBuilder::new()
/// .num_threads(2)
/// .build()
/// .expect("building pool failed");
/// let max = pool.install(|| manual_max(&v));
/// assert_eq!(max, v.last().cloned().unwrap());
/// ```
///
/// <div>
/// <img
/// src="http://www-id.imag.fr/Laboratoire/Membres/Wagner_Frederic/images/downgraded_manual_max.svg"/>
/// </div>
///
/// Using it we obtain the graph below.
/// On the real file you can hover but javascript and toggle the display of the different tags but
/// it is disabled with rustdoc so I downgraded the file
/// for this display.
pub fn subgraph<OP, R>(work_type: &'static str, work_amount: usize, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(work_type, || (), |_| work_amount, op)
}
/// Same as the subgraph function, but we can log a hardware event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```HardwareEventType::CPUCycles```
///
/// * ```HardwareEventType::Instructions```
///
/// * ```HardwareEventType::CacheReferences```
///
/// * ```HardwareEventType::CacheMisses```
///
/// * ```HardwareEventType::BranchInstructions```
///
/// * ```HardwareEventType::BranchMisses```
///
/// * ```HardwareEventType::BusCycles```
///
/// * ```HardwareEventType::StalledCyclesFrontend```
///
/// * ```HardwareEventType::StalledCyclesBackend```
///
/// * ```HardwareEventType::RefCPUCycles```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler.
/// note that It is **freaking slow**: 1 full second to set up the counter.
#[cfg(feature = "perf")]
pub fn subgraph_hardware_event<OP, R>(tag: &'static str, event: HardwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_hardware_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a software event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```SoftwareEventType::CpuClock```
///
/// * ```SoftwareEventType::TaskClock```
///
/// * ```SoftwareEventType::PageFaults```
///
/// * ```SoftwareEventType::CacheMisses```
///
/// * ```SoftwareEventType::ContextSwitches```
///
/// * ```SoftwareEventType::CpuMigrations```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMaj```
///
/// * ```SoftwareEventType::AlignmentFaults```
///
/// * ```SoftwareEventType::EmulationFaults```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
#[cfg(feature = "perf")]
pub fn subgraph_software_event<OP, R>(tag: &'static str, event: SoftwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_software_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a cache event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// CacheId:
///
/// * ```CacheId::L1D```
///
/// * ```CacheId::L1I```
///
/// * ```CacheId::LL```
///
/// * ```CacheId::DTLB```
///
/// * ```CacheId::ITLB```
///
/// * ```CacheId::BPU```
///
/// * ```CacheId::Node```
///
/// CacheOpId:
///
/// * ```CacheOpId::Read```
///
/// * ```CacheOpId::Write```
///
/// * ```CacheOpId::Prefetch```
///
/// CacheOpResultId:
///
/// * ```CacheOpResultId::Access```
///
/// * ```CacheOpResultId::Miss```
///
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
///
#[cfg(feature = "perf")]
pub fn subgraph_cache_event<OP, R>(
tag: &'static str,
cache_id: CacheId,
cache_op_id: CacheOpId,
cache_op_result_id: CacheOpResultId,
op: OP,
) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_cache_event(
cache_id,
cache_op_id,
cache_op_result_id,
)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Tag a subgraph with a custom value.
/// The start function will be called just before running the graph and produce an S.
/// The end function will be called just after running the graph on this S and produce a usize
/// which will the be stored for display.
pub fn custom_subgraph<OP, R, START, END, S>(tag: &'static str, start: START, end: END, op: OP) -> R
where
OP: FnOnce() -> R,
START: FnOnce() -> S,
END: FnOnce(S) -> usize,
{
let s = start();
start_subgraph(tag);
let r = op();
let measured_value = end(s);
end_subgraph(tag, measured_value);
r
}
/// Stop current task (virtually) and start a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn start_subgraph(tag: &'static str) {
let subgraph_start_task_id = next_task_id();
logs!(
// log child's work and dependencies.
RayonEvent::Child(subgraph_start_task_id),
// end current task
RayonEvent::TaskEnd(now()),
// execute full sequential task
RayonEvent::TaskStart(subgraph_start_task_id, now()),
RayonEvent::SubgraphStart(tag)
);
}
/// Stop current task (virtually) and end a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn end_subgraph(tag: &'static str, measured_value: usize) {
let continuation_task_id = next_task_id();
logs!(
RayonEvent::SubgraphEnd(tag, measured_value),
RayonEvent::Child(continuation_task_id),
RayonEvent::TaskEnd(now()),
// start continuation task
RayonEvent::TaskStart(continuation_task_id, now())
);
}
/// Identical to `join`, except that the closures have a parameter
/// that provides context for the way the closure has been called,
/// especially indicating whether they're executing on a different
/// thread than where `join_context` was called. This will occur if
/// the second job is stolen by a different thread, or if
/// `join_context` was called from outside the thread pool to begin
/// with.
pub fn join_context<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce(FnContext) -> RA + Send,
B: FnOnce(FnContext) -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = |c| {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = |c| {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join_context(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
/// Takes two closures and *potentially* runs them in parallel. It
/// returns a pair of the results from those closures.
///
/// Conceptually, calling `join()` is similar to spawning two threads,
/// one executing each of the two closures. However, the
/// implementation is quite different and incurs very low
/// overhead. The underlying technique is called "work stealing": the
/// Rayon runtime uses a fixed pool of worker threads and attempts to
/// only execute code in parallel when there are idle CPUs to handle
/// it.
///
/// When `join` is called from outside the thread pool, the calling
/// thread will block while the closures execute in the pool. When
/// `join` is called within the pool, the calling thread still actively
/// participates in the thread pool. It will begin by executing closure
/// A (on the current thread). While it is doing that, it will advertise
/// closure B as being available for other threads to execute. Once closure A
/// has completed, the current thread will try to execute closure B;
/// if however closure B has been stolen, then it will look for other work
/// while waiting for the thief to fully execute closure B. (This is the
/// typical work-stealing strategy).
///
/// # Examples
///
/// This example uses join to perform a quick-sort (note this is not a
/// particularly optimized implementation: if you **actually** want to
/// sort for real, you should prefer [the `par_sort` method] offered
/// by Rayon).
///
/// [the `par_sort` method]:../rayon/slice/trait.ParallelSliceMut.html#method.par_sort
///
/// ```rust
/// let mut v = vec![5, 1, 8, 22, 0, 44];
/// quick_sort(&mut v);
/// assert_eq!(v, vec![0, 1, 5, 8, 22, 44]);
///
/// fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
/// if v.len() > 1 {
/// let mid = partition(v);
/// let (lo, hi) = v.split_at_mut(mid);
/// rayon::join(|| quick_sort(lo),
/// || quick_sort(hi));
/// }
/// }
///
/// // Partition rearranges all items `<=` to the pivot
/// // item (arbitrary selected to be the last item in the slice)
/// // to the first half of the slice. It then returns the
/// // "dividing point" where the pivot is placed.
/// fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
/// let pivot = v.len() - 1;
/// let mut i = 0;
/// for j in 0..pivot {
/// if v[j] <= v[pivot] {
/// v.swap(i, j);
/// i += 1;
/// }
/// }
/// v.swap(i, pivot);
/// i
/// }
/// ```
///
/// # Warning about blocking I/O
///
/// The assumption is that the closures given to `join()` are
/// CPU-bound tasks that do not perform I/O or other blocking
/// operations. If you do perform I/O, and that I/O should block
/// (e.g., waiting for a network request), the overall performance may
/// be poor. Moreover, if you cause one closure to be blocked waiting
/// on another (for example, using a channel), that could lead to a
/// deadlock.
///
/// # Panics
///
/// No matter what happens, both closures will always be executed. If
/// a single closure panics, whether it be the first or second
/// closure, that panic will be propagated and hence `join()` will
/// panic with the same panic value. If both closures panic, `join()`
/// will panic with the panic value from the first closure.
pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce() -> RA + Send,
B: FnOnce() -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = || {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = || {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
// small global counter to increment file names
static INSTALL_COUNT: AtomicUsize = AtomicUsize::new(0);
/// We wrap rayon's pool into our own struct to overload the install method.
pub struct ThreadPool {
pub(crate) logs: Arc<Mutex<Vec<Arc<Storage<RayonEvent>>>>>,
pub(crate) pool: rayon::ThreadPool,
}
impl ThreadPool {
/// Reset all logs and counters to initial condition.
fn reset(&self) {
NEXT_TASK_ID.store(0, Ordering::SeqCst);
NEXT_ITERATOR_ID.store(0, Ordering::SeqCst);
let logs = &*self.logs.lock().unwrap(); // oh yeah baby
for log in logs {
log.clear();
}
}
/// Execute given closure in the thread pool, logging it's task as the initial one. | where
OP: FnOnce() -> R + Send,
R: Send,
{
self.reset();
let id = next_task_id();
let c = || {
log(RayonEvent::TaskStart(id, now()));
let result = op();
log(RayonEvent::TaskEnd(now()));
result
};
let start = now();
let r = self.pool.install(c);
let log = RunLog::new(
NEXT_TASK_ID.load(Ordering::Relaxed),
NEXT_ITERATOR_ID.load(Ordering::Relaxed),
&*self.logs.lock().unwrap(),
start,
);
(r, log)
}
/// Creates a scope that executes within this thread-pool.
/// Equivalent to `self.install(|| scope(...))`.
///
/// See also: [the `scope()` function][scope].
///
/// [scope]: fn.scope.html
pub fn scope<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s Scope<'scope>) -> R +'scope + Send,
R: Send,
{
self.install(|| scope(op))
}
/// Like `scope` but fifo.
pub fn scope_fifo<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s ScopeFifo<'scope>) -> R +'scope + Send,
R: Send,
{
self.install(|| scope_fifo(op))
}
/// Execute given closure in the thread pool, logging it's task as the initial one.
/// After running, we save a json file with filename being an incremental counter.
pub fn install<OP, R>(&self, op: OP) -> R
where
OP: FnOnce() -> R + Send,
R: Send,
{
let (r, log) = self.logging_install(op);
log.save(format!(
"log_{}.json",
INSTALL_COUNT.fetch_add(1, Ordering::SeqCst)
))
.expect("saving json failed");
r
}
///This function simply returns a comparator that allows us to add algorithms for comparison.
pub fn compare(&self) -> Comparator {
Comparator::new(self)
}
} | /// After running, we post-process the logs and return a `RunLog` together with the closure's
/// result.
pub fn logging_install<OP, R>(&self, op: OP) -> (R, RunLog) | random_line_split |
pool.rs | //! `LoggedPool` structure for logging raw tasks events.
#![macro_use]
// we can now use performance counters to tag subgraphs
#[cfg(feature = "perf")]
use perfcnt::linux::PerfCounterBuilderLinux;
#[cfg(feature = "perf")]
use perfcnt::linux::{CacheId, CacheOpId, CacheOpResultId, HardwareEventType, SoftwareEventType};
#[cfg(feature = "perf")]
use perfcnt::{AbstractPerfCounter, PerfCounter};
use crate::log::RunLog;
use crate::raw_events::{now, RayonEvent, TaskId};
use crate::storage::Storage;
use crate::Comparator;
use crate::{scope, scope_fifo, Scope, ScopeFifo};
use rayon;
use rayon::FnContext;
use std::cell::RefCell;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
/// We use an atomic usize to generate unique ids for tasks.
pub(crate) static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
/// We use an atomic usize to generate unique ids for iterators.
pub(crate) static NEXT_ITERATOR_ID: AtomicUsize = AtomicUsize::new(0);
/// get an id for a new task and increment global tasks counter.
pub fn next_task_id() -> TaskId {
NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst)
}
/// get an id for a new iterator and increment global iterators counter.
pub fn next_iterator_id() -> usize {
NEXT_ITERATOR_ID.fetch_add(1, Ordering::SeqCst)
}
thread_local!(pub(crate) static LOGS: RefCell<Arc<Storage<RayonEvent>>> = RefCell::new(Arc::new(Storage::new())));
/// Add given event to logs of current thread.
pub(crate) fn log(event: RayonEvent) {
LOGS.with(|l| l.borrow().push(event))
}
/// Logs several events at once (with decreased cost).
macro_rules! logs {
($($x:expr ), +) => {
$crate::pool::LOGS.with(|l| {let thread_logs = l.borrow();
$(
thread_logs.push($x);
)*
})
}
}
/// We tag all the tasks that op makes as one subgraph.
///
/// `work_type` is a str tag and `work_amount` an integer specifying the expected algorithmic cost
/// (should not be zero).
/// As we know the work and execution time we can compute an execution speed for each subgraph.
/// When different graphs are tagged with the same tag we can then compare their speeds.
/// Slow graphs will see their displayed colors darkened.
/// You can also hover on tasks to display their speeds.
///
/// Example:
///
/// ```
/// use rayon_logs::{join, subgraph, ThreadPoolBuilder};
///
/// fn manual_max(slice: &[u32]) -> u32 {
/// if slice.len() < 200_000 {
/// subgraph("max", slice.len(), || slice.iter().max().cloned().unwrap())
/// } else {
/// let middle = slice.len() / 2;
/// let (left, right) = slice.split_at(middle);
/// let (mleft, mright) = join(|| manual_max(left), || manual_max(right));
/// std::cmp::max(mleft, mright)
/// }
/// }
///
/// let v: Vec<u32> = (0..2_000_000).collect();
/// let pool = ThreadPoolBuilder::new()
/// .num_threads(2)
/// .build()
/// .expect("building pool failed");
/// let max = pool.install(|| manual_max(&v));
/// assert_eq!(max, v.last().cloned().unwrap());
/// ```
///
/// <div>
/// <img
/// src="http://www-id.imag.fr/Laboratoire/Membres/Wagner_Frederic/images/downgraded_manual_max.svg"/>
/// </div>
///
/// Using it we obtain the graph below.
/// On the real file you can hover but javascript and toggle the display of the different tags but
/// it is disabled with rustdoc so I downgraded the file
/// for this display.
pub fn subgraph<OP, R>(work_type: &'static str, work_amount: usize, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(work_type, || (), |_| work_amount, op)
}
/// Same as the subgraph function, but we can log a hardware event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```HardwareEventType::CPUCycles```
///
/// * ```HardwareEventType::Instructions```
///
/// * ```HardwareEventType::CacheReferences```
///
/// * ```HardwareEventType::CacheMisses```
///
/// * ```HardwareEventType::BranchInstructions```
///
/// * ```HardwareEventType::BranchMisses```
///
/// * ```HardwareEventType::BusCycles```
///
/// * ```HardwareEventType::StalledCyclesFrontend```
///
/// * ```HardwareEventType::StalledCyclesBackend```
///
/// * ```HardwareEventType::RefCPUCycles```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler.
/// note that It is **freaking slow**: 1 full second to set up the counter.
#[cfg(feature = "perf")]
pub fn subgraph_hardware_event<OP, R>(tag: &'static str, event: HardwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_hardware_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a software event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```SoftwareEventType::CpuClock```
///
/// * ```SoftwareEventType::TaskClock```
///
/// * ```SoftwareEventType::PageFaults```
///
/// * ```SoftwareEventType::CacheMisses```
///
/// * ```SoftwareEventType::ContextSwitches```
///
/// * ```SoftwareEventType::CpuMigrations```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMaj```
///
/// * ```SoftwareEventType::AlignmentFaults```
///
/// * ```SoftwareEventType::EmulationFaults```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
#[cfg(feature = "perf")]
pub fn subgraph_software_event<OP, R>(tag: &'static str, event: SoftwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_software_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a cache event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// CacheId:
///
/// * ```CacheId::L1D```
///
/// * ```CacheId::L1I```
///
/// * ```CacheId::LL```
///
/// * ```CacheId::DTLB```
///
/// * ```CacheId::ITLB```
///
/// * ```CacheId::BPU```
///
/// * ```CacheId::Node```
///
/// CacheOpId:
///
/// * ```CacheOpId::Read```
///
/// * ```CacheOpId::Write```
///
/// * ```CacheOpId::Prefetch```
///
/// CacheOpResultId:
///
/// * ```CacheOpResultId::Access```
///
/// * ```CacheOpResultId::Miss```
///
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
///
#[cfg(feature = "perf")]
pub fn subgraph_cache_event<OP, R>(
tag: &'static str,
cache_id: CacheId,
cache_op_id: CacheOpId,
cache_op_result_id: CacheOpResultId,
op: OP,
) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_cache_event(
cache_id,
cache_op_id,
cache_op_result_id,
)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Tag a subgraph with a custom value.
/// The start function will be called just before running the graph and produce an S.
/// The end function will be called just after running the graph on this S and produce a usize
/// which will the be stored for display.
pub fn custom_subgraph<OP, R, START, END, S>(tag: &'static str, start: START, end: END, op: OP) -> R
where
OP: FnOnce() -> R,
START: FnOnce() -> S,
END: FnOnce(S) -> usize,
{
let s = start();
start_subgraph(tag);
let r = op();
let measured_value = end(s);
end_subgraph(tag, measured_value);
r
}
/// Stop current task (virtually) and start a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn start_subgraph(tag: &'static str) {
let subgraph_start_task_id = next_task_id();
logs!(
// log child's work and dependencies.
RayonEvent::Child(subgraph_start_task_id),
// end current task
RayonEvent::TaskEnd(now()),
// execute full sequential task
RayonEvent::TaskStart(subgraph_start_task_id, now()),
RayonEvent::SubgraphStart(tag)
);
}
/// Stop current task (virtually) and end a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn end_subgraph(tag: &'static str, measured_value: usize) |
/// Identical to `join`, except that the closures have a parameter
/// that provides context for the way the closure has been called,
/// especially indicating whether they're executing on a different
/// thread than where `join_context` was called. This will occur if
/// the second job is stolen by a different thread, or if
/// `join_context` was called from outside the thread pool to begin
/// with.
pub fn join_context<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce(FnContext) -> RA + Send,
B: FnOnce(FnContext) -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = |c| {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = |c| {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join_context(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
/// Takes two closures and *potentially* runs them in parallel. It
/// returns a pair of the results from those closures.
///
/// Conceptually, calling `join()` is similar to spawning two threads,
/// one executing each of the two closures. However, the
/// implementation is quite different and incurs very low
/// overhead. The underlying technique is called "work stealing": the
/// Rayon runtime uses a fixed pool of worker threads and attempts to
/// only execute code in parallel when there are idle CPUs to handle
/// it.
///
/// When `join` is called from outside the thread pool, the calling
/// thread will block while the closures execute in the pool. When
/// `join` is called within the pool, the calling thread still actively
/// participates in the thread pool. It will begin by executing closure
/// A (on the current thread). While it is doing that, it will advertise
/// closure B as being available for other threads to execute. Once closure A
/// has completed, the current thread will try to execute closure B;
/// if however closure B has been stolen, then it will look for other work
/// while waiting for the thief to fully execute closure B. (This is the
/// typical work-stealing strategy).
///
/// # Examples
///
/// This example uses join to perform a quick-sort (note this is not a
/// particularly optimized implementation: if you **actually** want to
/// sort for real, you should prefer [the `par_sort` method] offered
/// by Rayon).
///
/// [the `par_sort` method]:../rayon/slice/trait.ParallelSliceMut.html#method.par_sort
///
/// ```rust
/// let mut v = vec![5, 1, 8, 22, 0, 44];
/// quick_sort(&mut v);
/// assert_eq!(v, vec![0, 1, 5, 8, 22, 44]);
///
/// fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
/// if v.len() > 1 {
/// let mid = partition(v);
/// let (lo, hi) = v.split_at_mut(mid);
/// rayon::join(|| quick_sort(lo),
/// || quick_sort(hi));
/// }
/// }
///
/// // Partition rearranges all items `<=` to the pivot
/// // item (arbitrary selected to be the last item in the slice)
/// // to the first half of the slice. It then returns the
/// // "dividing point" where the pivot is placed.
/// fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
/// let pivot = v.len() - 1;
/// let mut i = 0;
/// for j in 0..pivot {
/// if v[j] <= v[pivot] {
/// v.swap(i, j);
/// i += 1;
/// }
/// }
/// v.swap(i, pivot);
/// i
/// }
/// ```
///
/// # Warning about blocking I/O
///
/// The assumption is that the closures given to `join()` are
/// CPU-bound tasks that do not perform I/O or other blocking
/// operations. If you do perform I/O, and that I/O should block
/// (e.g., waiting for a network request), the overall performance may
/// be poor. Moreover, if you cause one closure to be blocked waiting
/// on another (for example, using a channel), that could lead to a
/// deadlock.
///
/// # Panics
///
/// No matter what happens, both closures will always be executed. If
/// a single closure panics, whether it be the first or second
/// closure, that panic will be propagated and hence `join()` will
/// panic with the same panic value. If both closures panic, `join()`
/// will panic with the panic value from the first closure.
pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce() -> RA + Send,
B: FnOnce() -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = || {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = || {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
// small global counter to increment file names
static INSTALL_COUNT: AtomicUsize = AtomicUsize::new(0);
/// We wrap rayon's pool into our own struct to overload the install method.
pub struct ThreadPool {
pub(crate) logs: Arc<Mutex<Vec<Arc<Storage<RayonEvent>>>>>,
pub(crate) pool: rayon::ThreadPool,
}
impl ThreadPool {
/// Reset all logs and counters to initial condition.
fn reset(&self) {
NEXT_TASK_ID.store(0, Ordering::SeqCst);
NEXT_ITERATOR_ID.store(0, Ordering::SeqCst);
let logs = &*self.logs.lock().unwrap(); // oh yeah baby
for log in logs {
log.clear();
}
}
/// Execute given closure in the thread pool, logging it's task as the initial one.
/// After running, we post-process the logs and return a `RunLog` together with the closure's
/// result.
pub fn logging_install<OP, R>(&self, op: OP) -> (R, RunLog)
where
OP: FnOnce() -> R + Send,
R: Send,
{
self.reset();
let id = next_task_id();
let c = || {
log(RayonEvent::TaskStart(id, now()));
let result = op();
log(RayonEvent::TaskEnd(now()));
result
};
let start = now();
let r = self.pool.install(c);
let log = RunLog::new(
NEXT_TASK_ID.load(Ordering::Relaxed),
NEXT_ITERATOR_ID.load(Ordering::Relaxed),
&*self.logs.lock().unwrap(),
start,
);
(r, log)
}
/// Creates a scope that executes within this thread-pool.
/// Equivalent to `self.install(|| scope(...))`.
///
/// See also: [the `scope()` function][scope].
///
/// [scope]: fn.scope.html
pub fn scope<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s Scope<'scope>) -> R +'scope + Send,
R: Send,
{
self.install(|| scope(op))
}
/// Like `scope` but fifo.
pub fn scope_fifo<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s ScopeFifo<'scope>) -> R +'scope + Send,
R: Send,
{
self.install(|| scope_fifo(op))
}
/// Execute given closure in the thread pool, logging it's task as the initial one.
/// After running, we save a json file with filename being an incremental counter.
pub fn install<OP, R>(&self, op: OP) -> R
where
OP: FnOnce() -> R + Send,
R: Send,
{
let (r, log) = self.logging_install(op);
log.save(format!(
"log_{}.json",
INSTALL_COUNT.fetch_add(1, Ordering::SeqCst)
))
.expect("saving json failed");
r
}
///This function simply returns a comparator that allows us to add algorithms for comparison.
pub fn compare(&self) -> Comparator {
Comparator::new(self)
}
}
| {
let continuation_task_id = next_task_id();
logs!(
RayonEvent::SubgraphEnd(tag, measured_value),
RayonEvent::Child(continuation_task_id),
RayonEvent::TaskEnd(now()),
// start continuation task
RayonEvent::TaskStart(continuation_task_id, now())
);
} | identifier_body |
pool.rs | //! `LoggedPool` structure for logging raw tasks events.
#![macro_use]
// we can now use performance counters to tag subgraphs
#[cfg(feature = "perf")]
use perfcnt::linux::PerfCounterBuilderLinux;
#[cfg(feature = "perf")]
use perfcnt::linux::{CacheId, CacheOpId, CacheOpResultId, HardwareEventType, SoftwareEventType};
#[cfg(feature = "perf")]
use perfcnt::{AbstractPerfCounter, PerfCounter};
use crate::log::RunLog;
use crate::raw_events::{now, RayonEvent, TaskId};
use crate::storage::Storage;
use crate::Comparator;
use crate::{scope, scope_fifo, Scope, ScopeFifo};
use rayon;
use rayon::FnContext;
use std::cell::RefCell;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
/// We use an atomic usize to generate unique ids for tasks.
pub(crate) static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
/// We use an atomic usize to generate unique ids for iterators.
pub(crate) static NEXT_ITERATOR_ID: AtomicUsize = AtomicUsize::new(0);
/// get an id for a new task and increment global tasks counter.
pub fn next_task_id() -> TaskId {
NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst)
}
/// get an id for a new iterator and increment global iterators counter.
pub fn next_iterator_id() -> usize {
NEXT_ITERATOR_ID.fetch_add(1, Ordering::SeqCst)
}
thread_local!(pub(crate) static LOGS: RefCell<Arc<Storage<RayonEvent>>> = RefCell::new(Arc::new(Storage::new())));
/// Add given event to logs of current thread.
pub(crate) fn log(event: RayonEvent) {
LOGS.with(|l| l.borrow().push(event))
}
/// Logs several events at once (with decreased cost).
macro_rules! logs {
($($x:expr ), +) => {
$crate::pool::LOGS.with(|l| {let thread_logs = l.borrow();
$(
thread_logs.push($x);
)*
})
}
}
/// We tag all the tasks that op makes as one subgraph.
///
/// `work_type` is a str tag and `work_amount` an integer specifying the expected algorithmic cost
/// (should not be zero).
/// As we know the work and execution time we can compute an execution speed for each subgraph.
/// When different graphs are tagged with the same tag we can then compare their speeds.
/// Slow graphs will see their displayed colors darkened.
/// You can also hover on tasks to display their speeds.
///
/// Example:
///
/// ```
/// use rayon_logs::{join, subgraph, ThreadPoolBuilder};
///
/// fn manual_max(slice: &[u32]) -> u32 {
/// if slice.len() < 200_000 {
/// subgraph("max", slice.len(), || slice.iter().max().cloned().unwrap())
/// } else {
/// let middle = slice.len() / 2;
/// let (left, right) = slice.split_at(middle);
/// let (mleft, mright) = join(|| manual_max(left), || manual_max(right));
/// std::cmp::max(mleft, mright)
/// }
/// }
///
/// let v: Vec<u32> = (0..2_000_000).collect();
/// let pool = ThreadPoolBuilder::new()
/// .num_threads(2)
/// .build()
/// .expect("building pool failed");
/// let max = pool.install(|| manual_max(&v));
/// assert_eq!(max, v.last().cloned().unwrap());
/// ```
///
/// <div>
/// <img
/// src="http://www-id.imag.fr/Laboratoire/Membres/Wagner_Frederic/images/downgraded_manual_max.svg"/>
/// </div>
///
/// Using it we obtain the graph below.
/// On the real file you can hover but javascript and toggle the display of the different tags but
/// it is disabled with rustdoc so I downgraded the file
/// for this display.
pub fn subgraph<OP, R>(work_type: &'static str, work_amount: usize, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(work_type, || (), |_| work_amount, op)
}
/// Same as the subgraph function, but we can log a hardware event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```HardwareEventType::CPUCycles```
///
/// * ```HardwareEventType::Instructions```
///
/// * ```HardwareEventType::CacheReferences```
///
/// * ```HardwareEventType::CacheMisses```
///
/// * ```HardwareEventType::BranchInstructions```
///
/// * ```HardwareEventType::BranchMisses```
///
/// * ```HardwareEventType::BusCycles```
///
/// * ```HardwareEventType::StalledCyclesFrontend```
///
/// * ```HardwareEventType::StalledCyclesBackend```
///
/// * ```HardwareEventType::RefCPUCycles```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler.
/// note that It is **freaking slow**: 1 full second to set up the counter.
#[cfg(feature = "perf")]
pub fn subgraph_hardware_event<OP, R>(tag: &'static str, event: HardwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_hardware_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a software event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```SoftwareEventType::CpuClock```
///
/// * ```SoftwareEventType::TaskClock```
///
/// * ```SoftwareEventType::PageFaults```
///
/// * ```SoftwareEventType::CacheMisses```
///
/// * ```SoftwareEventType::ContextSwitches```
///
/// * ```SoftwareEventType::CpuMigrations```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMaj```
///
/// * ```SoftwareEventType::AlignmentFaults```
///
/// * ```SoftwareEventType::EmulationFaults```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
#[cfg(feature = "perf")]
pub fn subgraph_software_event<OP, R>(tag: &'static str, event: SoftwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_software_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a cache event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// CacheId:
///
/// * ```CacheId::L1D```
///
/// * ```CacheId::L1I```
///
/// * ```CacheId::LL```
///
/// * ```CacheId::DTLB```
///
/// * ```CacheId::ITLB```
///
/// * ```CacheId::BPU```
///
/// * ```CacheId::Node```
///
/// CacheOpId:
///
/// * ```CacheOpId::Read```
///
/// * ```CacheOpId::Write```
///
/// * ```CacheOpId::Prefetch```
///
/// CacheOpResultId:
///
/// * ```CacheOpResultId::Access```
///
/// * ```CacheOpResultId::Miss```
///
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
///
#[cfg(feature = "perf")]
pub fn subgraph_cache_event<OP, R>(
tag: &'static str,
cache_id: CacheId,
cache_op_id: CacheOpId,
cache_op_result_id: CacheOpResultId,
op: OP,
) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_cache_event(
cache_id,
cache_op_id,
cache_op_result_id,
)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Tag a subgraph with a custom value.
/// The start function will be called just before running the graph and produce an S.
/// The end function will be called just after running the graph on this S and produce a usize
/// which will the be stored for display.
pub fn custom_subgraph<OP, R, START, END, S>(tag: &'static str, start: START, end: END, op: OP) -> R
where
OP: FnOnce() -> R,
START: FnOnce() -> S,
END: FnOnce(S) -> usize,
{
let s = start();
start_subgraph(tag);
let r = op();
let measured_value = end(s);
end_subgraph(tag, measured_value);
r
}
/// Stop current task (virtually) and start a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn start_subgraph(tag: &'static str) {
let subgraph_start_task_id = next_task_id();
logs!(
// log child's work and dependencies.
RayonEvent::Child(subgraph_start_task_id),
// end current task
RayonEvent::TaskEnd(now()),
// execute full sequential task
RayonEvent::TaskStart(subgraph_start_task_id, now()),
RayonEvent::SubgraphStart(tag)
);
}
/// Stop current task (virtually) and end a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn end_subgraph(tag: &'static str, measured_value: usize) {
let continuation_task_id = next_task_id();
logs!(
RayonEvent::SubgraphEnd(tag, measured_value),
RayonEvent::Child(continuation_task_id),
RayonEvent::TaskEnd(now()),
// start continuation task
RayonEvent::TaskStart(continuation_task_id, now())
);
}
/// Identical to `join`, except that the closures have a parameter
/// that provides context for the way the closure has been called,
/// especially indicating whether they're executing on a different
/// thread than where `join_context` was called. This will occur if
/// the second job is stolen by a different thread, or if
/// `join_context` was called from outside the thread pool to begin
/// with.
pub fn join_context<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce(FnContext) -> RA + Send,
B: FnOnce(FnContext) -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = |c| {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = |c| {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join_context(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
/// Takes two closures and *potentially* runs them in parallel. It
/// returns a pair of the results from those closures.
///
/// Conceptually, calling `join()` is similar to spawning two threads,
/// one executing each of the two closures. However, the
/// implementation is quite different and incurs very low
/// overhead. The underlying technique is called "work stealing": the
/// Rayon runtime uses a fixed pool of worker threads and attempts to
/// only execute code in parallel when there are idle CPUs to handle
/// it.
///
/// When `join` is called from outside the thread pool, the calling
/// thread will block while the closures execute in the pool. When
/// `join` is called within the pool, the calling thread still actively
/// participates in the thread pool. It will begin by executing closure
/// A (on the current thread). While it is doing that, it will advertise
/// closure B as being available for other threads to execute. Once closure A
/// has completed, the current thread will try to execute closure B;
/// if however closure B has been stolen, then it will look for other work
/// while waiting for the thief to fully execute closure B. (This is the
/// typical work-stealing strategy).
///
/// # Examples
///
/// This example uses join to perform a quick-sort (note this is not a
/// particularly optimized implementation: if you **actually** want to
/// sort for real, you should prefer [the `par_sort` method] offered
/// by Rayon).
///
/// [the `par_sort` method]:../rayon/slice/trait.ParallelSliceMut.html#method.par_sort
///
/// ```rust
/// let mut v = vec![5, 1, 8, 22, 0, 44];
/// quick_sort(&mut v);
/// assert_eq!(v, vec![0, 1, 5, 8, 22, 44]);
///
/// fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
/// if v.len() > 1 {
/// let mid = partition(v);
/// let (lo, hi) = v.split_at_mut(mid);
/// rayon::join(|| quick_sort(lo),
/// || quick_sort(hi));
/// }
/// }
///
/// // Partition rearranges all items `<=` to the pivot
/// // item (arbitrary selected to be the last item in the slice)
/// // to the first half of the slice. It then returns the
/// // "dividing point" where the pivot is placed.
/// fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
/// let pivot = v.len() - 1;
/// let mut i = 0;
/// for j in 0..pivot {
/// if v[j] <= v[pivot] {
/// v.swap(i, j);
/// i += 1;
/// }
/// }
/// v.swap(i, pivot);
/// i
/// }
/// ```
///
/// # Warning about blocking I/O
///
/// The assumption is that the closures given to `join()` are
/// CPU-bound tasks that do not perform I/O or other blocking
/// operations. If you do perform I/O, and that I/O should block
/// (e.g., waiting for a network request), the overall performance may
/// be poor. Moreover, if you cause one closure to be blocked waiting
/// on another (for example, using a channel), that could lead to a
/// deadlock.
///
/// # Panics
///
/// No matter what happens, both closures will always be executed. If
/// a single closure panics, whether it be the first or second
/// closure, that panic will be propagated and hence `join()` will
/// panic with the same panic value. If both closures panic, `join()`
/// will panic with the panic value from the first closure.
pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce() -> RA + Send,
B: FnOnce() -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = || {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = || {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
// small global counter to increment file names
static INSTALL_COUNT: AtomicUsize = AtomicUsize::new(0);
/// We wrap rayon's pool into our own struct to overload the install method.
pub struct ThreadPool {
pub(crate) logs: Arc<Mutex<Vec<Arc<Storage<RayonEvent>>>>>,
pub(crate) pool: rayon::ThreadPool,
}
impl ThreadPool {
/// Reset all logs and counters to initial condition.
fn reset(&self) {
NEXT_TASK_ID.store(0, Ordering::SeqCst);
NEXT_ITERATOR_ID.store(0, Ordering::SeqCst);
let logs = &*self.logs.lock().unwrap(); // oh yeah baby
for log in logs {
log.clear();
}
}
/// Execute given closure in the thread pool, logging it's task as the initial one.
/// After running, we post-process the logs and return a `RunLog` together with the closure's
/// result.
pub fn | <OP, R>(&self, op: OP) -> (R, RunLog)
where
OP: FnOnce() -> R + Send,
R: Send,
{
self.reset();
let id = next_task_id();
let c = || {
log(RayonEvent::TaskStart(id, now()));
let result = op();
log(RayonEvent::TaskEnd(now()));
result
};
let start = now();
let r = self.pool.install(c);
let log = RunLog::new(
NEXT_TASK_ID.load(Ordering::Relaxed),
NEXT_ITERATOR_ID.load(Ordering::Relaxed),
&*self.logs.lock().unwrap(),
start,
);
(r, log)
}
/// Creates a scope that executes within this thread-pool.
/// Equivalent to `self.install(|| scope(...))`.
///
/// See also: [the `scope()` function][scope].
///
/// [scope]: fn.scope.html
pub fn scope<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s Scope<'scope>) -> R +'scope + Send,
R: Send,
{
self.install(|| scope(op))
}
/// Like `scope` but fifo.
pub fn scope_fifo<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s ScopeFifo<'scope>) -> R +'scope + Send,
R: Send,
{
self.install(|| scope_fifo(op))
}
/// Execute given closure in the thread pool, logging it's task as the initial one.
/// After running, we save a json file with filename being an incremental counter.
pub fn install<OP, R>(&self, op: OP) -> R
where
OP: FnOnce() -> R + Send,
R: Send,
{
let (r, log) = self.logging_install(op);
log.save(format!(
"log_{}.json",
INSTALL_COUNT.fetch_add(1, Ordering::SeqCst)
))
.expect("saving json failed");
r
}
///This function simply returns a comparator that allows us to add algorithms for comparison.
pub fn compare(&self) -> Comparator {
Comparator::new(self)
}
}
| logging_install | identifier_name |
main.rs | fn main() {
//Rust deals with stack and heaps for memory managment no gc or direct memory management
//The stack memory is a first in last off type queue
//Stack data must take up a known and fixed size
//In rust the heap is used for when we don't know the size of the vector at compile time
//or if the memory to be allocated is dynamic
//Heap memory is not really organized data just kinda gets thrown where the os has space for it
//Therefore, the program has to jump around to get data which can slow things down.
//Function local variables get pushed onto the stack and then popped off when
//it's done
//A value is assigned an owner which is it's owner. Only one owner can exist at a time
//When the owner is out of scope the value disappears
//Examples to go over variable scope
let s = "hello";
{
let s = "hello2";
println!("s: {}", s);
}
println!("Previous s is out of scope but the one defined earlier isn't");
println!("s: {}", s);
//Onto the next example which goes over the rules of ownership
//It's going to be using the String type aka StringBuffers
let mut s = String::from("hello");
s.push_str(", world!");// s must be mutable for this to work
println!("{}", s);
//Note: In C++, this pattern of deallocating resources at the end of an item’s lifetime is sometimes
//called Resource Acquisition Is Initialization (RAII). The drop function in Rust will be familiar
//to you if you’ve used RAII patterns.
let x = 5;
let y = x;// y is just a copy of x since they are simple types and have a fixed size
let s1 = String::from("hello");
let s2 = s1; // s2 is a copy of the pointer to the data that s1 points to
// this errors out because s1 does not have a copy trait which meanse
//we made a shallow copy instead of a deep copy. Rust does not like this
// if we tried to use s1. If we use s2 we are fine since s1 is invalidated
//after we assign s2 to s1 values. This operation is called a move.
// println!("{}", s2);
let s1 = String::from("hello");
let s2 = s1.clone(); // This creates a deep copy of of s1. We can now use s1 in other places with out
// it being invalid
// println!("{}",s1);
//Info about what things that make a deep copy when you do let x = something; let y = x;
// Rust has a special annotation called the Copy trait that we can place on types like integers that are
// stored on the stack (we’ll talk more about traits in Chapter 10). If a type has the Copy trait, an older
// variable is still usable after assignment. Rust won’t let us annotate a type with the Copy trait if the
// type, or any of its parts, has implemented the Drop trait. If the type needs something special to happen
// when the value goes out of scope and we add the Copy annotation to that type, we’ll get a compile time error.
// To learn about how to add the Copy annotation to your type, see Appendix C on Derivable Traits.
// So what types are Copy? You can check the documentation for the given type to be sure, but as a general rule,
// any group of simple scalar values can be Copy, and nothing that requires allocation or is some form of resource
// is Copy. Here are some of the types that are Copy:
// All the integer types, like u32.
// The boolean type, bool, with values true and false.
// All the floating point types, like f64.
// Tuples, but only if they contain types that are also Copy. (i32, i32) is Copy, but (i32, String) is not.
let s = String::from("hello"); // s comes into scope.
//So in rust if we pass a variable into a function it loses it's ownership to the
//function. Then once the function is over that variable no longer exists
//because it is now out of scope.
takes_ownership(s); // s's value moves into the function...
//... and so is no longer valid here.
let x = 5; // x comes into scope.
//If a variable has the copy trait then only a copy is made to the function and
//we can still use the variable afterwards even though all the variables in the
//function are now out of scope.
makes_copy(x); // x would move into the function,
// but i32 is Copy, so it’s okay to still
// use x afterward.
//we can give ownership of a variable from a function by having an expression at the end.
//We could pass in a variable and then take back its ownership by doing this. However, I think this
//is kinda of a pain. The people at Rust feel the same.
let s1 = gives_ownership();
//Rust also let's return variables as tuples so which we can then can deconstruct this when
//we get the returned values.
//Now it's time to go over references and borrowing!
let s1 = String::from("hello");
//The & creates a reference to a variable. They can be thought of a pointer to the original data.
//By doing this we do not pass ownership of the variable to the function
//Therefore when we go out of scope of the function we still have ownership of the variable
//where the function call was made.
//References as function parameters is called borrowing.
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
//We can not modify a borrowed variable.
//change(&s1);
let mut s1 = String::from("hello");
//We can fix this by making a mutable reference
//We also need to make sure that our variable we're passing in is also mutable.
change(&mut s1);
println!("{}", s1);
//You are only allowed one mutable reference to a particular piece of data in a particular scope.
//This insures that we don't have any aliasing with our references refering to the same data.
//The benefit of having this restriction is that Rust can prevent data races at compile time.
//From the rust book
//Whew! We also cannot have a mutable reference while we have an immutable one.
//Users of an immutable reference don’t expect the values to suddenly change out from under them!
//However, multiple immutable references are okay because no one who is just reading the data has
//the ability to affect anyone else’s reading of the data.
//let mut s = String::from("Hello");
//let r1 = &s; //Immutable reference
//let r2 = &s; //Immutable reference
//let r3 = &s; //Mutable reference -- big no no
//The compiler does not dangling pointers/references. It therefore will error out on us.
// let refernece_to_nothing = dangle();
//We are now going to go over slices.
//From the rust book: Another data type that does not have ownership is the slice.
//Slices let you reference a contiguous sequence of elements in a collection rather than the whole collection.
// let mut s = String::from("hello world");
// let word = first_word(&s); // word will get the value 5.
// s.clear(); // This empties the String, making it equal to "".
// word still has the value 5 here, but there's no more string that
// we could meaningfully use the value 5 with. word is now totally invalid!
//The index we got is now completely out of sync with our original string.
//If we end up having more indices we could get even more out of sync with our data.
//For strings we can take advantage of a built in feature called string slices.
//They create a reference to portions of a string.
let s = String::from("hello world");
//Slicing is similar to slicing in python where you have a starting index and then
//the ending value is +1 of the data you actually care about.
let hello = &s[0..5];
// let hello = &s[..5]; //Equivalent to the above
let world = &s[6..11];
// let world = &s[6..]; //Equivalent to the above
let len = s.len();
let slice = &s[0..len];
// let slice = &s[..]; //Equivalent to the above
// We now have a straightforward API that’s much harder to mess up, since the compiler will
//ensure the references into the String remain valid. Remember the bug in the program in Listing 4-11,
//when we got the index to the end of the first word but then cleared the string so our index was invalid?
//That code was logically incorrect but didn’t show any immediate errors. The problems would show up later
//if we kept trying to use the first word index with an emptied string. Slices make this bug impossible
//and let us know we have a problem with our code much sooner. Using the slice version of first_word
//will throw a compile time error:
// let mut s = String::from("hello world");
// let word = first_word(&s);
// s.clear(); // Error!
// Recall from the borrowing rules that if we have an immutable reference to something, we cannot also
// take a mutable reference. Because clear needs to truncate the String, it tries to take a mutable reference,
// which fails. Not only has Rust made our API easier to use, but it has also eliminated an entire class of errors
// at compile time!
let s = "Hello, world!";
// The type of s here is &str: it’s a slice pointing to that specific point of the binary. This is also why string
// literals are immutable; &str is an immutable reference.
let my_string = String::from("hello world");
// first_word works on slices of `String`s
let word = first_word(&my_string[..]);
let my_string_literal = "hello world";
// first_word works on slices of string literals
let word = first_word(&my_string_literal[..]);
// since string literals *are* string slices already,
// this works too, without the slice syntax!
let word = first_word(my_string_literal);
let a = [1, 2, 3, 4, 5];
let slice = &a[1..3];
// This slice has the type &[i32]. It works the same way as string slices do, by storing a reference to the
// first element and a length. You’ll use this kind of slice for all sorts of other collections. We’ll discuss
// these collections in detail when we talk about vectors in Chapter 8
} // Here, x goes out of scope, then s. But since s's value was moved, nothing
// special happens.
fn takes_ownership(some_string: String) { // some_string comes into scope.
println!("{}", some_string);
} // Here, some_string goes out of scope and `drop` is called. The backing
// memory is freed.
fn makes_copy(some_integer: i32) { // some_integer comes into scope.
println!("{}", some_integer);
} // Here, some_integer goes out of scope. Nothing special happens.
//Tell what type the function will return
fn gives_ownership() -> String { // gives_ownership will move its
// return value into the function
// that calls it.
let some_string = String::from("hello"); // some_string comes into scope.
some_string // some_string is returned and
// moves out to the calling
// function.
}
fn calculate_length(s: &String) -> usize {
s.len()
}
//This fun | r on us since we are trying to
//modify a borrowed variable. We will always get an
//error for this function even if we never call it.
// fn change(some_string: &String) {
// some_string.push_str(", world");
// }
//This fixes the above code by making a mutable reference that we can now modify.
fn change(some_string: &mut String) {
some_string.push_str(", world");
}
//The below code creates a dangling pointer/reference.
//So when the data goes out of scope at the end of the function
//our reference now points to memory that has been freed.
//The compiler catches this and errors out on us.
// fn dangle() -> &String {
// let s = String::from("hello");
// &s
// }
//This version doesn't create slices of the data so things become out of index with each other
//We are going to rewrite it with a new version
// fn first_word(s: &String) -> usize {
// //We are converting our string into a byte
// let bytes = s.as_bytes();
// //We now iterate through the string using iter.
// //the enumerate function packages up each part of the
// //iterator as a tuple with an index and a reference to the value
// for (i, &item) in bytes.iter().enumerate() {
// //We check to see if the byte literal of the space is
// //equal to our item.
// //If it is then we return that index.
// if item == b''{
// return i;
// }
// }
// //If we don't run across a space at all then we return the length of the string.
// s.len()
// }
//We can change the following to the current function signature
// fn first_word(s: &String) -> &str {
//The new signature now allows us to operate on both Strings and str types
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &s[0..i];
}
}
&s[..]
} | ction will erro | identifier_body |
main.rs | fn main() {
//Rust deals with stack and heaps for memory managment no gc or direct memory management
//The stack memory is a first in last off type queue
//Stack data must take up a known and fixed size
//In rust the heap is used for when we don't know the size of the vector at compile time
//or if the memory to be allocated is dynamic
//Heap memory is not really organized data just kinda gets thrown where the os has space for it
//Therefore, the program has to jump around to get data which can slow things down.
//Function local variables get pushed onto the stack and then popped off when
//it's done
//A value is assigned an owner which is it's owner. Only one owner can exist at a time
//When the owner is out of scope the value disappears
//Examples to go over variable scope
let s = "hello";
{
let s = "hello2";
println!("s: {}", s);
}
println!("Previous s is out of scope but the one defined earlier isn't");
println!("s: {}", s);
//Onto the next example which goes over the rules of ownership
//It's going to be using the String type aka StringBuffers
let mut s = String::from("hello");
s.push_str(", world!");// s must be mutable for this to work
println!("{}", s);
//Note: In C++, this pattern of deallocating resources at the end of an item’s lifetime is sometimes
//called Resource Acquisition Is Initialization (RAII). The drop function in Rust will be familiar
//to you if you’ve used RAII patterns.
let x = 5;
let y = x;// y is just a copy of x since they are simple types and have a fixed size
let s1 = String::from("hello");
let s2 = s1; // s2 is a copy of the pointer to the data that s1 points to
// this errors out because s1 does not have a copy trait which meanse
//we made a shallow copy instead of a deep copy. Rust does not like this
// if we tried to use s1. If we use s2 we are fine since s1 is invalidated
//after we assign s2 to s1 values. This operation is called a move.
// println!("{}", s2);
let s1 = String::from("hello");
let s2 = s1.clone(); // This creates a deep copy of of s1. We can now use s1 in other places with out
// it being invalid
// println!("{}",s1);
//Info about what things that make a deep copy when you do let x = something; let y = x;
// Rust has a special annotation called the Copy trait that we can place on types like integers that are
// stored on the stack (we’ll talk more about traits in Chapter 10). If a type has the Copy trait, an older
// variable is still usable after assignment. Rust won’t let us annotate a type with the Copy trait if the
// type, or any of its parts, has implemented the Drop trait. If the type needs something special to happen
// when the value goes out of scope and we add the Copy annotation to that type, we’ll get a compile time error.
// To learn about how to add the Copy annotation to your type, see Appendix C on Derivable Traits.
// So what types are Copy? You can check the documentation for the given type to be sure, but as a general rule,
// any group of simple scalar values can be Copy, and nothing that requires allocation or is some form of resource
// is Copy. Here are some of the types that are Copy:
// All the integer types, like u32.
// The boolean type, bool, with values true and false.
// All the floating point types, like f64.
// Tuples, but only if they contain types that are also Copy. (i32, i32) is Copy, but (i32, String) is not.
let s = String::from("hello"); // s comes into scope.
//So in rust if we pass a variable into a function it loses it's ownership to the
//function. Then once the function is over that variable no longer exists
//because it is now out of scope.
takes_ownership(s); // s's value moves into the function...
//... and so is no longer valid here.
let x = 5; // x comes into scope.
//If a variable has the copy trait then only a copy is made to the function and
//we can still use the variable afterwards even though all the variables in the
//function are now out of scope.
makes_copy(x); // x would move into the function,
// but i32 is Copy, so it’s okay to still
// use x afterward.
//we can give ownership of a variable from a function by having an expression at the end.
//We could pass in a variable and then take back its ownership by doing this. However, I think this
//is kinda of a pain. The people at Rust feel the same.
let s1 = gives_ownership();
//Rust also let's return variables as tuples so which we can then can deconstruct this when
//we get the returned values.
//Now it's time to go over references and borrowing!
let s1 = String::from("hello");
//The & creates a reference to a variable. They can be thought of a pointer to the original data.
//By doing this we do not pass ownership of the variable to the function
//Therefore when we go out of scope of the function we still have ownership of the variable
//where the function call was made.
//References as function parameters is called borrowing.
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
//We can not modify a borrowed variable.
//change(&s1);
let mut s1 = String::from("hello");
//We can fix this by making a mutable reference
//We also need to make sure that our variable we're passing in is also mutable.
change(&mut s1);
println!("{}", s1);
//You are only allowed one mutable reference to a particular piece of data in a particular scope.
//This insures that we don't have any aliasing with our references refering to the same data.
//The benefit of having this restriction is that Rust can prevent data races at compile time.
//From the rust book
//Whew! We also cannot have a mutable reference while we have an immutable one.
//Users of an immutable reference don’t expect the values to suddenly change out from under them!
//However, multiple immutable references are okay because no one who is just reading the data has
//the ability to affect anyone else’s reading of the data.
//let mut s = String::from("Hello");
//let r1 = &s; //Immutable reference
//let r2 = &s; //Immutable reference
//let r3 = &s; //Mutable reference -- big no no
//The compiler does not dangling pointers/references. It therefore will error out on us.
// let refernece_to_nothing = dangle();
//We are now going to go over slices.
//From the rust book: Another data type that does not have ownership is the slice.
//Slices let you reference a contiguous sequence of elements in a collection rather than the whole collection.
// let mut s = String::from("hello world");
// let word = first_word(&s); // word will get the value 5.
// s.clear(); // This empties the String, making it equal to "".
// word still has the value 5 here, but there's no more string that
// we could meaningfully use the value 5 with. word is now totally invalid!
//The index we got is now completely out of sync with our original string.
//If we end up having more indices we could get even more out of sync with our data.
//For strings we can take advantage of a built in feature called string slices.
//They create a reference to portions of a string.
let s = String::from("hello world");
//Slicing is similar to slicing in python where you have a starting index and then
//the ending value is +1 of the data you actually care about.
let hello = &s[0..5];
// let hello = &s[..5]; //Equivalent to the above
let world = &s[6..11];
// let world = &s[6..]; //Equivalent to the above
let len = s.len();
let slice = &s[0..len];
// let slice = &s[..]; //Equivalent to the above
// We now have a straightforward API that’s much harder to mess up, since the compiler will
//ensure the references into the String remain valid. Remember the bug in the program in Listing 4-11,
//when we got the index to the end of the first word but then cleared the string so our index was invalid?
//That code was logically incorrect but didn’t show any immediate errors. The problems would show up later
//if we kept trying to use the first word index with an emptied string. Slices make this bug impossible
//and let us know we have a problem with our code much sooner. Using the slice version of first_word
//will throw a compile time error:
// let mut s = String::from("hello world");
// let word = first_word(&s);
// s.clear(); // Error!
// Recall from the borrowing rules that if we have an immutable reference to something, we cannot also
// take a mutable reference. Because clear needs to truncate the String, it tries to take a mutable reference,
// which fails. Not only has Rust made our API easier to use, but it has also eliminated an entire class of errors
// at compile time!
let s = "Hello, world!";
// The type of s here is &str: it’s a slice pointing to that specific point of the binary. This is also why string
// literals are immutable; &str is an immutable reference.
let my_string = String::from("hello world");
// first_word works on slices of `String`s
let word = first_word(&my_string[..]);
let my_string_literal = "hello world";
// first_word works on slices of string literals
let word = first_word(&my_string_literal[..]);
// since string literals *are* string slices already,
// this works too, without the slice syntax!
let word = first_word(my_string_literal);
let a = [1, 2, 3, 4, 5];
let slice = &a[1..3];
// This slice has the type &[i32]. It works the same way as string slices do, by storing a reference to the
// first element and a length. You’ll use this kind of slice for all sorts of other collections. We’ll discuss
// these collections in detail when we talk about vectors in Chapter 8
} // Here, x goes out of scope, then s. But since s's value was moved, nothing
// special happens.
fn takes_ownership(some_string: String) { // some_string comes into scope.
println!("{}", some_string);
} // Here, some_string goes out of scope and `drop` is called. The backing
// memory is freed.
fn makes_copy(some_integer: i32) { // some_integer comes into scope.
println!("{}", some_integer);
} // Here, some_integer goes out of scope. Nothing special happens.
//Tell what type the function will return
fn gives_ownership() -> String { // gives_ownership will move its
// return value into the function
// that calls it.
let some_string = String::from("hello"); // some_string comes into scope.
some_string // some_string is returned and
// moves out to the calling
// function.
}
fn calculate_length(s: &String) -> usize {
s.len()
}
//This function will error on us since we are trying to
//modify a borrowed variable. We will always get an
//error for this function even if we never call it.
// fn change(some_string: &String) {
// some_string.push_str(", world");
// }
//This fixes the above code by making a mutable reference that we can now modify.
fn change(some_string: &mut String) {
some_string.push_str(", world");
}
//The below code creates a dangling pointer/reference.
//So when the data goes out of scope at the end of the function
//our reference now points to memory that has been freed.
//The compiler catches this and errors out on us.
// fn dangle() -> &String {
// let s = String::from("hello");
// &s
// }
//This version doesn't create slices of the data so things become out of index with each other
//We are going to rewrite it with a new version
// fn first_word(s: &String) -> usize {
// //We are converting our string into a byte
// let bytes = s.as_bytes();
// //We now iterate through the string using iter.
// //the enumerate function packages up each part of the
// //iterator as a tuple with an index and a reference to the value
// for (i, &item) in bytes.iter().enumerate() {
// //We check to see if the byte literal of the space is
// //equal to our item.
// //If it is then we return that index.
// if item == b''{
// return i;
// }
// }
// //If we don't run across a space at all then we return the length of the string.
// s.len()
// }
//We can change the following to the current function signature
// fn first_word(s: &String) -> &str {
//The new signature now allows us to operate on both Strings and str types
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &s[0. | .i];
}
}
&s[..]
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.